root/lib/debugobjects.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. enable_object_debug
  2. disable_object_debug
  3. fill_pool
  4. lookup_object
  5. __alloc_object
  6. alloc_object
  7. free_obj_work
  8. __free_object
  9. free_object
  10. debug_objects_oom
  11. get_bucket
  12. debug_print_object
  13. debug_object_fixup
  14. debug_object_is_on_stack
  15. __debug_object_init
  16. debug_object_init
  17. debug_object_init_on_stack
  18. debug_object_activate
  19. debug_object_deactivate
  20. debug_object_destroy
  21. debug_object_free
  22. debug_object_assert_init
  23. debug_object_active_state
  24. __debug_check_no_obj_freed
  25. debug_check_no_obj_freed
  26. debug_stats_show
  27. debug_stats_open
  28. debug_objects_init_debugfs
  29. debug_objects_init_debugfs
  30. is_static_object
  31. fixup_init
  32. fixup_activate
  33. fixup_destroy
  34. fixup_free
  35. check_results
  36. debug_objects_selftest
  37. debug_objects_selftest
  38. debug_objects_early_init
  39. debug_objects_replace_static_objects
  40. debug_objects_mem_init

   1 /*
   2  * Generic infrastructure for lifetime debugging of objects.
   3  *
   4  * Started by Thomas Gleixner
   5  *
   6  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
   7  *
   8  * For licencing details see kernel-base/COPYING
   9  */
  10 
  11 #define pr_fmt(fmt) "ODEBUG: " fmt
  12 
  13 #include <linux/debugobjects.h>
  14 #include <linux/interrupt.h>
  15 #include <linux/sched.h>
  16 #include <linux/sched/task_stack.h>
  17 #include <linux/seq_file.h>
  18 #include <linux/debugfs.h>
  19 #include <linux/slab.h>
  20 #include <linux/hash.h>
  21 #include <linux/kmemleak.h>
  22 
  23 #define ODEBUG_HASH_BITS        14
  24 #define ODEBUG_HASH_SIZE        (1 << ODEBUG_HASH_BITS)
  25 
  26 #define ODEBUG_POOL_SIZE        1024
  27 #define ODEBUG_POOL_MIN_LEVEL   256
  28 #define ODEBUG_POOL_PERCPU_SIZE 64
  29 #define ODEBUG_BATCH_SIZE       16
  30 
  31 #define ODEBUG_CHUNK_SHIFT      PAGE_SHIFT
  32 #define ODEBUG_CHUNK_SIZE       (1 << ODEBUG_CHUNK_SHIFT)
  33 #define ODEBUG_CHUNK_MASK       (~(ODEBUG_CHUNK_SIZE - 1))
  34 
  35 /*
  36  * We limit the freeing of debug objects via workqueue at a maximum
  37  * frequency of 10Hz and about 1024 objects for each freeing operation.
  38  * So it is freeing at most 10k debug objects per second.
  39  */
  40 #define ODEBUG_FREE_WORK_MAX    1024
  41 #define ODEBUG_FREE_WORK_DELAY  DIV_ROUND_UP(HZ, 10)
  42 
  43 struct debug_bucket {
  44         struct hlist_head       list;
  45         raw_spinlock_t          lock;
  46 };
  47 
  48 /*
  49  * Debug object percpu free list
  50  * Access is protected by disabling irq
  51  */
  52 struct debug_percpu_free {
  53         struct hlist_head       free_objs;
  54         int                     obj_free;
  55 };
  56 
  57 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
  58 
  59 static struct debug_bucket      obj_hash[ODEBUG_HASH_SIZE];
  60 
  61 static struct debug_obj         obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  62 
  63 static DEFINE_RAW_SPINLOCK(pool_lock);
  64 
  65 static HLIST_HEAD(obj_pool);
  66 static HLIST_HEAD(obj_to_free);
  67 
  68 /*
  69  * Because of the presence of percpu free pools, obj_pool_free will
  70  * under-count those in the percpu free pools. Similarly, obj_pool_used
  71  * will over-count those in the percpu free pools. Adjustments will be
  72  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
  73  * can be off.
  74  */
  75 static int                      obj_pool_min_free = ODEBUG_POOL_SIZE;
  76 static int                      obj_pool_free = ODEBUG_POOL_SIZE;
  77 static int                      obj_pool_used;
  78 static int                      obj_pool_max_used;
  79 static bool                     obj_freeing;
  80 /* The number of objs on the global free list */
  81 static int                      obj_nr_tofree;
  82 
  83 static int                      debug_objects_maxchain __read_mostly;
  84 static int __maybe_unused       debug_objects_maxchecked __read_mostly;
  85 static int                      debug_objects_fixups __read_mostly;
  86 static int                      debug_objects_warnings __read_mostly;
  87 static int                      debug_objects_enabled __read_mostly
  88                                 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  89 static int                      debug_objects_pool_size __read_mostly
  90                                 = ODEBUG_POOL_SIZE;
  91 static int                      debug_objects_pool_min_level __read_mostly
  92                                 = ODEBUG_POOL_MIN_LEVEL;
  93 static struct debug_obj_descr   *descr_test  __read_mostly;
  94 static struct kmem_cache        *obj_cache __read_mostly;
  95 
  96 /*
  97  * Track numbers of kmem_cache_alloc()/free() calls done.
  98  */
  99 static int                      debug_objects_allocated;
 100 static int                      debug_objects_freed;
 101 
 102 static void free_obj_work(struct work_struct *work);
 103 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
 104 
 105 static int __init enable_object_debug(char *str)
 106 {
 107         debug_objects_enabled = 1;
 108         return 0;
 109 }
 110 
 111 static int __init disable_object_debug(char *str)
 112 {
 113         debug_objects_enabled = 0;
 114         return 0;
 115 }
 116 
 117 early_param("debug_objects", enable_object_debug);
 118 early_param("no_debug_objects", disable_object_debug);
 119 
 120 static const char *obj_states[ODEBUG_STATE_MAX] = {
 121         [ODEBUG_STATE_NONE]             = "none",
 122         [ODEBUG_STATE_INIT]             = "initialized",
 123         [ODEBUG_STATE_INACTIVE]         = "inactive",
 124         [ODEBUG_STATE_ACTIVE]           = "active",
 125         [ODEBUG_STATE_DESTROYED]        = "destroyed",
 126         [ODEBUG_STATE_NOTAVAILABLE]     = "not available",
 127 };
 128 
 129 static void fill_pool(void)
 130 {
 131         gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
 132         struct debug_obj *obj;
 133         unsigned long flags;
 134 
 135         if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
 136                 return;
 137 
 138         /*
 139          * Reuse objs from the global free list; they will be reinitialized
 140          * when allocating.
 141          *
 142          * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
 143          * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
 144          * sections.
 145          */
 146         while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
 147                 raw_spin_lock_irqsave(&pool_lock, flags);
 148                 /*
 149                  * Recheck with the lock held as the worker thread might have
 150                  * won the race and freed the global free list already.
 151                  */
 152                 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
 153                         obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
 154                         hlist_del(&obj->node);
 155                         WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
 156                         hlist_add_head(&obj->node, &obj_pool);
 157                         WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 158                 }
 159                 raw_spin_unlock_irqrestore(&pool_lock, flags);
 160         }
 161 
 162         if (unlikely(!obj_cache))
 163                 return;
 164 
 165         while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
 166                 struct debug_obj *new[ODEBUG_BATCH_SIZE];
 167                 int cnt;
 168 
 169                 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
 170                         new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
 171                         if (!new[cnt])
 172                                 break;
 173                 }
 174                 if (!cnt)
 175                         return;
 176 
 177                 raw_spin_lock_irqsave(&pool_lock, flags);
 178                 while (cnt) {
 179                         hlist_add_head(&new[--cnt]->node, &obj_pool);
 180                         debug_objects_allocated++;
 181                         WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 182                 }
 183                 raw_spin_unlock_irqrestore(&pool_lock, flags);
 184         }
 185 }
 186 
 187 /*
 188  * Lookup an object in the hash bucket.
 189  */
 190 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
 191 {
 192         struct debug_obj *obj;
 193         int cnt = 0;
 194 
 195         hlist_for_each_entry(obj, &b->list, node) {
 196                 cnt++;
 197                 if (obj->object == addr)
 198                         return obj;
 199         }
 200         if (cnt > debug_objects_maxchain)
 201                 debug_objects_maxchain = cnt;
 202 
 203         return NULL;
 204 }
 205 
 206 /*
 207  * Allocate a new object from the hlist
 208  */
 209 static struct debug_obj *__alloc_object(struct hlist_head *list)
 210 {
 211         struct debug_obj *obj = NULL;
 212 
 213         if (list->first) {
 214                 obj = hlist_entry(list->first, typeof(*obj), node);
 215                 hlist_del(&obj->node);
 216         }
 217 
 218         return obj;
 219 }
 220 
 221 /*
 222  * Allocate a new object. If the pool is empty, switch off the debugger.
 223  * Must be called with interrupts disabled.
 224  */
 225 static struct debug_obj *
 226 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
 227 {
 228         struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
 229         struct debug_obj *obj;
 230 
 231         if (likely(obj_cache)) {
 232                 obj = __alloc_object(&percpu_pool->free_objs);
 233                 if (obj) {
 234                         percpu_pool->obj_free--;
 235                         goto init_obj;
 236                 }
 237         }
 238 
 239         raw_spin_lock(&pool_lock);
 240         obj = __alloc_object(&obj_pool);
 241         if (obj) {
 242                 obj_pool_used++;
 243                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
 244 
 245                 /*
 246                  * Looking ahead, allocate one batch of debug objects and
 247                  * put them into the percpu free pool.
 248                  */
 249                 if (likely(obj_cache)) {
 250                         int i;
 251 
 252                         for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
 253                                 struct debug_obj *obj2;
 254 
 255                                 obj2 = __alloc_object(&obj_pool);
 256                                 if (!obj2)
 257                                         break;
 258                                 hlist_add_head(&obj2->node,
 259                                                &percpu_pool->free_objs);
 260                                 percpu_pool->obj_free++;
 261                                 obj_pool_used++;
 262                                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
 263                         }
 264                 }
 265 
 266                 if (obj_pool_used > obj_pool_max_used)
 267                         obj_pool_max_used = obj_pool_used;
 268 
 269                 if (obj_pool_free < obj_pool_min_free)
 270                         obj_pool_min_free = obj_pool_free;
 271         }
 272         raw_spin_unlock(&pool_lock);
 273 
 274 init_obj:
 275         if (obj) {
 276                 obj->object = addr;
 277                 obj->descr  = descr;
 278                 obj->state  = ODEBUG_STATE_NONE;
 279                 obj->astate = 0;
 280                 hlist_add_head(&obj->node, &b->list);
 281         }
 282         return obj;
 283 }
 284 
 285 /*
 286  * workqueue function to free objects.
 287  *
 288  * To reduce contention on the global pool_lock, the actual freeing of
 289  * debug objects will be delayed if the pool_lock is busy.
 290  */
 291 static void free_obj_work(struct work_struct *work)
 292 {
 293         struct hlist_node *tmp;
 294         struct debug_obj *obj;
 295         unsigned long flags;
 296         HLIST_HEAD(tofree);
 297 
 298         WRITE_ONCE(obj_freeing, false);
 299         if (!raw_spin_trylock_irqsave(&pool_lock, flags))
 300                 return;
 301 
 302         if (obj_pool_free >= debug_objects_pool_size)
 303                 goto free_objs;
 304 
 305         /*
 306          * The objs on the pool list might be allocated before the work is
 307          * run, so recheck if pool list it full or not, if not fill pool
 308          * list from the global free list. As it is likely that a workload
 309          * may be gearing up to use more and more objects, don't free any
 310          * of them until the next round.
 311          */
 312         while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
 313                 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
 314                 hlist_del(&obj->node);
 315                 hlist_add_head(&obj->node, &obj_pool);
 316                 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 317                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
 318         }
 319         raw_spin_unlock_irqrestore(&pool_lock, flags);
 320         return;
 321 
 322 free_objs:
 323         /*
 324          * Pool list is already full and there are still objs on the free
 325          * list. Move remaining free objs to a temporary list to free the
 326          * memory outside the pool_lock held region.
 327          */
 328         if (obj_nr_tofree) {
 329                 hlist_move_list(&obj_to_free, &tofree);
 330                 debug_objects_freed += obj_nr_tofree;
 331                 WRITE_ONCE(obj_nr_tofree, 0);
 332         }
 333         raw_spin_unlock_irqrestore(&pool_lock, flags);
 334 
 335         hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
 336                 hlist_del(&obj->node);
 337                 kmem_cache_free(obj_cache, obj);
 338         }
 339 }
 340 
 341 static void __free_object(struct debug_obj *obj)
 342 {
 343         struct debug_obj *objs[ODEBUG_BATCH_SIZE];
 344         struct debug_percpu_free *percpu_pool;
 345         int lookahead_count = 0;
 346         unsigned long flags;
 347         bool work;
 348 
 349         local_irq_save(flags);
 350         if (!obj_cache)
 351                 goto free_to_obj_pool;
 352 
 353         /*
 354          * Try to free it into the percpu pool first.
 355          */
 356         percpu_pool = this_cpu_ptr(&percpu_obj_pool);
 357         if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
 358                 hlist_add_head(&obj->node, &percpu_pool->free_objs);
 359                 percpu_pool->obj_free++;
 360                 local_irq_restore(flags);
 361                 return;
 362         }
 363 
 364         /*
 365          * As the percpu pool is full, look ahead and pull out a batch
 366          * of objects from the percpu pool and free them as well.
 367          */
 368         for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
 369                 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
 370                 if (!objs[lookahead_count])
 371                         break;
 372                 percpu_pool->obj_free--;
 373         }
 374 
 375 free_to_obj_pool:
 376         raw_spin_lock(&pool_lock);
 377         work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
 378                (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
 379         obj_pool_used--;
 380 
 381         if (work) {
 382                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
 383                 hlist_add_head(&obj->node, &obj_to_free);
 384                 if (lookahead_count) {
 385                         WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
 386                         obj_pool_used -= lookahead_count;
 387                         while (lookahead_count) {
 388                                 hlist_add_head(&objs[--lookahead_count]->node,
 389                                                &obj_to_free);
 390                         }
 391                 }
 392 
 393                 if ((obj_pool_free > debug_objects_pool_size) &&
 394                     (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
 395                         int i;
 396 
 397                         /*
 398                          * Free one more batch of objects from obj_pool.
 399                          */
 400                         for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
 401                                 obj = __alloc_object(&obj_pool);
 402                                 hlist_add_head(&obj->node, &obj_to_free);
 403                                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
 404                                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
 405                         }
 406                 }
 407         } else {
 408                 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 409                 hlist_add_head(&obj->node, &obj_pool);
 410                 if (lookahead_count) {
 411                         WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
 412                         obj_pool_used -= lookahead_count;
 413                         while (lookahead_count) {
 414                                 hlist_add_head(&objs[--lookahead_count]->node,
 415                                                &obj_pool);
 416                         }
 417                 }
 418         }
 419         raw_spin_unlock(&pool_lock);
 420         local_irq_restore(flags);
 421 }
 422 
 423 /*
 424  * Put the object back into the pool and schedule work to free objects
 425  * if necessary.
 426  */
 427 static void free_object(struct debug_obj *obj)
 428 {
 429         __free_object(obj);
 430         if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
 431                 WRITE_ONCE(obj_freeing, true);
 432                 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
 433         }
 434 }
 435 
 436 /*
 437  * We run out of memory. That means we probably have tons of objects
 438  * allocated.
 439  */
 440 static void debug_objects_oom(void)
 441 {
 442         struct debug_bucket *db = obj_hash;
 443         struct hlist_node *tmp;
 444         HLIST_HEAD(freelist);
 445         struct debug_obj *obj;
 446         unsigned long flags;
 447         int i;
 448 
 449         pr_warn("Out of memory. ODEBUG disabled\n");
 450 
 451         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
 452                 raw_spin_lock_irqsave(&db->lock, flags);
 453                 hlist_move_list(&db->list, &freelist);
 454                 raw_spin_unlock_irqrestore(&db->lock, flags);
 455 
 456                 /* Now free them */
 457                 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
 458                         hlist_del(&obj->node);
 459                         free_object(obj);
 460                 }
 461         }
 462 }
 463 
 464 /*
 465  * We use the pfn of the address for the hash. That way we can check
 466  * for freed objects simply by checking the affected bucket.
 467  */
 468 static struct debug_bucket *get_bucket(unsigned long addr)
 469 {
 470         unsigned long hash;
 471 
 472         hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
 473         return &obj_hash[hash];
 474 }
 475 
 476 static void debug_print_object(struct debug_obj *obj, char *msg)
 477 {
 478         struct debug_obj_descr *descr = obj->descr;
 479         static int limit;
 480 
 481         if (limit < 5 && descr != descr_test) {
 482                 void *hint = descr->debug_hint ?
 483                         descr->debug_hint(obj->object) : NULL;
 484                 limit++;
 485                 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
 486                                  "object type: %s hint: %pS\n",
 487                         msg, obj_states[obj->state], obj->astate,
 488                         descr->name, hint);
 489         }
 490         debug_objects_warnings++;
 491 }
 492 
 493 /*
 494  * Try to repair the damage, so we have a better chance to get useful
 495  * debug output.
 496  */
 497 static bool
 498 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
 499                    void * addr, enum debug_obj_state state)
 500 {
 501         if (fixup && fixup(addr, state)) {
 502                 debug_objects_fixups++;
 503                 return true;
 504         }
 505         return false;
 506 }
 507 
 508 static void debug_object_is_on_stack(void *addr, int onstack)
 509 {
 510         int is_on_stack;
 511         static int limit;
 512 
 513         if (limit > 4)
 514                 return;
 515 
 516         is_on_stack = object_is_on_stack(addr);
 517         if (is_on_stack == onstack)
 518                 return;
 519 
 520         limit++;
 521         if (is_on_stack)
 522                 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
 523                          task_stack_page(current));
 524         else
 525                 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
 526                          task_stack_page(current));
 527 
 528         WARN_ON(1);
 529 }
 530 
 531 static void
 532 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
 533 {
 534         enum debug_obj_state state;
 535         bool check_stack = false;
 536         struct debug_bucket *db;
 537         struct debug_obj *obj;
 538         unsigned long flags;
 539 
 540         fill_pool();
 541 
 542         db = get_bucket((unsigned long) addr);
 543 
 544         raw_spin_lock_irqsave(&db->lock, flags);
 545 
 546         obj = lookup_object(addr, db);
 547         if (!obj) {
 548                 obj = alloc_object(addr, db, descr);
 549                 if (!obj) {
 550                         debug_objects_enabled = 0;
 551                         raw_spin_unlock_irqrestore(&db->lock, flags);
 552                         debug_objects_oom();
 553                         return;
 554                 }
 555                 check_stack = true;
 556         }
 557 
 558         switch (obj->state) {
 559         case ODEBUG_STATE_NONE:
 560         case ODEBUG_STATE_INIT:
 561         case ODEBUG_STATE_INACTIVE:
 562                 obj->state = ODEBUG_STATE_INIT;
 563                 break;
 564 
 565         case ODEBUG_STATE_ACTIVE:
 566                 state = obj->state;
 567                 raw_spin_unlock_irqrestore(&db->lock, flags);
 568                 debug_print_object(obj, "init");
 569                 debug_object_fixup(descr->fixup_init, addr, state);
 570                 return;
 571 
 572         case ODEBUG_STATE_DESTROYED:
 573                 raw_spin_unlock_irqrestore(&db->lock, flags);
 574                 debug_print_object(obj, "init");
 575                 return;
 576         default:
 577                 break;
 578         }
 579 
 580         raw_spin_unlock_irqrestore(&db->lock, flags);
 581         if (check_stack)
 582                 debug_object_is_on_stack(addr, onstack);
 583 }
 584 
 585 /**
 586  * debug_object_init - debug checks when an object is initialized
 587  * @addr:       address of the object
 588  * @descr:      pointer to an object specific debug description structure
 589  */
 590 void debug_object_init(void *addr, struct debug_obj_descr *descr)
 591 {
 592         if (!debug_objects_enabled)
 593                 return;
 594 
 595         __debug_object_init(addr, descr, 0);
 596 }
 597 EXPORT_SYMBOL_GPL(debug_object_init);
 598 
 599 /**
 600  * debug_object_init_on_stack - debug checks when an object on stack is
 601  *                              initialized
 602  * @addr:       address of the object
 603  * @descr:      pointer to an object specific debug description structure
 604  */
 605 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
 606 {
 607         if (!debug_objects_enabled)
 608                 return;
 609 
 610         __debug_object_init(addr, descr, 1);
 611 }
 612 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
 613 
 614 /**
 615  * debug_object_activate - debug checks when an object is activated
 616  * @addr:       address of the object
 617  * @descr:      pointer to an object specific debug description structure
 618  * Returns 0 for success, -EINVAL for check failed.
 619  */
 620 int debug_object_activate(void *addr, struct debug_obj_descr *descr)
 621 {
 622         enum debug_obj_state state;
 623         struct debug_bucket *db;
 624         struct debug_obj *obj;
 625         unsigned long flags;
 626         int ret;
 627         struct debug_obj o = { .object = addr,
 628                                .state = ODEBUG_STATE_NOTAVAILABLE,
 629                                .descr = descr };
 630 
 631         if (!debug_objects_enabled)
 632                 return 0;
 633 
 634         db = get_bucket((unsigned long) addr);
 635 
 636         raw_spin_lock_irqsave(&db->lock, flags);
 637 
 638         obj = lookup_object(addr, db);
 639         if (obj) {
 640                 bool print_object = false;
 641 
 642                 switch (obj->state) {
 643                 case ODEBUG_STATE_INIT:
 644                 case ODEBUG_STATE_INACTIVE:
 645                         obj->state = ODEBUG_STATE_ACTIVE;
 646                         ret = 0;
 647                         break;
 648 
 649                 case ODEBUG_STATE_ACTIVE:
 650                         state = obj->state;
 651                         raw_spin_unlock_irqrestore(&db->lock, flags);
 652                         debug_print_object(obj, "activate");
 653                         ret = debug_object_fixup(descr->fixup_activate, addr, state);
 654                         return ret ? 0 : -EINVAL;
 655 
 656                 case ODEBUG_STATE_DESTROYED:
 657                         print_object = true;
 658                         ret = -EINVAL;
 659                         break;
 660                 default:
 661                         ret = 0;
 662                         break;
 663                 }
 664                 raw_spin_unlock_irqrestore(&db->lock, flags);
 665                 if (print_object)
 666                         debug_print_object(obj, "activate");
 667                 return ret;
 668         }
 669 
 670         raw_spin_unlock_irqrestore(&db->lock, flags);
 671 
 672         /*
 673          * We are here when a static object is activated. We
 674          * let the type specific code confirm whether this is
 675          * true or not. if true, we just make sure that the
 676          * static object is tracked in the object tracker. If
 677          * not, this must be a bug, so we try to fix it up.
 678          */
 679         if (descr->is_static_object && descr->is_static_object(addr)) {
 680                 /* track this static object */
 681                 debug_object_init(addr, descr);
 682                 debug_object_activate(addr, descr);
 683         } else {
 684                 debug_print_object(&o, "activate");
 685                 ret = debug_object_fixup(descr->fixup_activate, addr,
 686                                         ODEBUG_STATE_NOTAVAILABLE);
 687                 return ret ? 0 : -EINVAL;
 688         }
 689         return 0;
 690 }
 691 EXPORT_SYMBOL_GPL(debug_object_activate);
 692 
 693 /**
 694  * debug_object_deactivate - debug checks when an object is deactivated
 695  * @addr:       address of the object
 696  * @descr:      pointer to an object specific debug description structure
 697  */
 698 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
 699 {
 700         struct debug_bucket *db;
 701         struct debug_obj *obj;
 702         unsigned long flags;
 703         bool print_object = false;
 704 
 705         if (!debug_objects_enabled)
 706                 return;
 707 
 708         db = get_bucket((unsigned long) addr);
 709 
 710         raw_spin_lock_irqsave(&db->lock, flags);
 711 
 712         obj = lookup_object(addr, db);
 713         if (obj) {
 714                 switch (obj->state) {
 715                 case ODEBUG_STATE_INIT:
 716                 case ODEBUG_STATE_INACTIVE:
 717                 case ODEBUG_STATE_ACTIVE:
 718                         if (!obj->astate)
 719                                 obj->state = ODEBUG_STATE_INACTIVE;
 720                         else
 721                                 print_object = true;
 722                         break;
 723 
 724                 case ODEBUG_STATE_DESTROYED:
 725                         print_object = true;
 726                         break;
 727                 default:
 728                         break;
 729                 }
 730         }
 731 
 732         raw_spin_unlock_irqrestore(&db->lock, flags);
 733         if (!obj) {
 734                 struct debug_obj o = { .object = addr,
 735                                        .state = ODEBUG_STATE_NOTAVAILABLE,
 736                                        .descr = descr };
 737 
 738                 debug_print_object(&o, "deactivate");
 739         } else if (print_object) {
 740                 debug_print_object(obj, "deactivate");
 741         }
 742 }
 743 EXPORT_SYMBOL_GPL(debug_object_deactivate);
 744 
 745 /**
 746  * debug_object_destroy - debug checks when an object is destroyed
 747  * @addr:       address of the object
 748  * @descr:      pointer to an object specific debug description structure
 749  */
 750 void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
 751 {
 752         enum debug_obj_state state;
 753         struct debug_bucket *db;
 754         struct debug_obj *obj;
 755         unsigned long flags;
 756         bool print_object = false;
 757 
 758         if (!debug_objects_enabled)
 759                 return;
 760 
 761         db = get_bucket((unsigned long) addr);
 762 
 763         raw_spin_lock_irqsave(&db->lock, flags);
 764 
 765         obj = lookup_object(addr, db);
 766         if (!obj)
 767                 goto out_unlock;
 768 
 769         switch (obj->state) {
 770         case ODEBUG_STATE_NONE:
 771         case ODEBUG_STATE_INIT:
 772         case ODEBUG_STATE_INACTIVE:
 773                 obj->state = ODEBUG_STATE_DESTROYED;
 774                 break;
 775         case ODEBUG_STATE_ACTIVE:
 776                 state = obj->state;
 777                 raw_spin_unlock_irqrestore(&db->lock, flags);
 778                 debug_print_object(obj, "destroy");
 779                 debug_object_fixup(descr->fixup_destroy, addr, state);
 780                 return;
 781 
 782         case ODEBUG_STATE_DESTROYED:
 783                 print_object = true;
 784                 break;
 785         default:
 786                 break;
 787         }
 788 out_unlock:
 789         raw_spin_unlock_irqrestore(&db->lock, flags);
 790         if (print_object)
 791                 debug_print_object(obj, "destroy");
 792 }
 793 EXPORT_SYMBOL_GPL(debug_object_destroy);
 794 
 795 /**
 796  * debug_object_free - debug checks when an object is freed
 797  * @addr:       address of the object
 798  * @descr:      pointer to an object specific debug description structure
 799  */
 800 void debug_object_free(void *addr, struct debug_obj_descr *descr)
 801 {
 802         enum debug_obj_state state;
 803         struct debug_bucket *db;
 804         struct debug_obj *obj;
 805         unsigned long flags;
 806 
 807         if (!debug_objects_enabled)
 808                 return;
 809 
 810         db = get_bucket((unsigned long) addr);
 811 
 812         raw_spin_lock_irqsave(&db->lock, flags);
 813 
 814         obj = lookup_object(addr, db);
 815         if (!obj)
 816                 goto out_unlock;
 817 
 818         switch (obj->state) {
 819         case ODEBUG_STATE_ACTIVE:
 820                 state = obj->state;
 821                 raw_spin_unlock_irqrestore(&db->lock, flags);
 822                 debug_print_object(obj, "free");
 823                 debug_object_fixup(descr->fixup_free, addr, state);
 824                 return;
 825         default:
 826                 hlist_del(&obj->node);
 827                 raw_spin_unlock_irqrestore(&db->lock, flags);
 828                 free_object(obj);
 829                 return;
 830         }
 831 out_unlock:
 832         raw_spin_unlock_irqrestore(&db->lock, flags);
 833 }
 834 EXPORT_SYMBOL_GPL(debug_object_free);
 835 
 836 /**
 837  * debug_object_assert_init - debug checks when object should be init-ed
 838  * @addr:       address of the object
 839  * @descr:      pointer to an object specific debug description structure
 840  */
 841 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
 842 {
 843         struct debug_bucket *db;
 844         struct debug_obj *obj;
 845         unsigned long flags;
 846 
 847         if (!debug_objects_enabled)
 848                 return;
 849 
 850         db = get_bucket((unsigned long) addr);
 851 
 852         raw_spin_lock_irqsave(&db->lock, flags);
 853 
 854         obj = lookup_object(addr, db);
 855         if (!obj) {
 856                 struct debug_obj o = { .object = addr,
 857                                        .state = ODEBUG_STATE_NOTAVAILABLE,
 858                                        .descr = descr };
 859 
 860                 raw_spin_unlock_irqrestore(&db->lock, flags);
 861                 /*
 862                  * Maybe the object is static, and we let the type specific
 863                  * code confirm. Track this static object if true, else invoke
 864                  * fixup.
 865                  */
 866                 if (descr->is_static_object && descr->is_static_object(addr)) {
 867                         /* Track this static object */
 868                         debug_object_init(addr, descr);
 869                 } else {
 870                         debug_print_object(&o, "assert_init");
 871                         debug_object_fixup(descr->fixup_assert_init, addr,
 872                                            ODEBUG_STATE_NOTAVAILABLE);
 873                 }
 874                 return;
 875         }
 876 
 877         raw_spin_unlock_irqrestore(&db->lock, flags);
 878 }
 879 EXPORT_SYMBOL_GPL(debug_object_assert_init);
 880 
 881 /**
 882  * debug_object_active_state - debug checks object usage state machine
 883  * @addr:       address of the object
 884  * @descr:      pointer to an object specific debug description structure
 885  * @expect:     expected state
 886  * @next:       state to move to if expected state is found
 887  */
 888 void
 889 debug_object_active_state(void *addr, struct debug_obj_descr *descr,
 890                           unsigned int expect, unsigned int next)
 891 {
 892         struct debug_bucket *db;
 893         struct debug_obj *obj;
 894         unsigned long flags;
 895         bool print_object = false;
 896 
 897         if (!debug_objects_enabled)
 898                 return;
 899 
 900         db = get_bucket((unsigned long) addr);
 901 
 902         raw_spin_lock_irqsave(&db->lock, flags);
 903 
 904         obj = lookup_object(addr, db);
 905         if (obj) {
 906                 switch (obj->state) {
 907                 case ODEBUG_STATE_ACTIVE:
 908                         if (obj->astate == expect)
 909                                 obj->astate = next;
 910                         else
 911                                 print_object = true;
 912                         break;
 913 
 914                 default:
 915                         print_object = true;
 916                         break;
 917                 }
 918         }
 919 
 920         raw_spin_unlock_irqrestore(&db->lock, flags);
 921         if (!obj) {
 922                 struct debug_obj o = { .object = addr,
 923                                        .state = ODEBUG_STATE_NOTAVAILABLE,
 924                                        .descr = descr };
 925 
 926                 debug_print_object(&o, "active_state");
 927         } else if (print_object) {
 928                 debug_print_object(obj, "active_state");
 929         }
 930 }
 931 EXPORT_SYMBOL_GPL(debug_object_active_state);
 932 
 933 #ifdef CONFIG_DEBUG_OBJECTS_FREE
 934 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
 935 {
 936         unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
 937         struct debug_obj_descr *descr;
 938         enum debug_obj_state state;
 939         struct debug_bucket *db;
 940         struct hlist_node *tmp;
 941         struct debug_obj *obj;
 942         int cnt, objs_checked = 0;
 943 
 944         saddr = (unsigned long) address;
 945         eaddr = saddr + size;
 946         paddr = saddr & ODEBUG_CHUNK_MASK;
 947         chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
 948         chunks >>= ODEBUG_CHUNK_SHIFT;
 949 
 950         for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
 951                 db = get_bucket(paddr);
 952 
 953 repeat:
 954                 cnt = 0;
 955                 raw_spin_lock_irqsave(&db->lock, flags);
 956                 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
 957                         cnt++;
 958                         oaddr = (unsigned long) obj->object;
 959                         if (oaddr < saddr || oaddr >= eaddr)
 960                                 continue;
 961 
 962                         switch (obj->state) {
 963                         case ODEBUG_STATE_ACTIVE:
 964                                 descr = obj->descr;
 965                                 state = obj->state;
 966                                 raw_spin_unlock_irqrestore(&db->lock, flags);
 967                                 debug_print_object(obj, "free");
 968                                 debug_object_fixup(descr->fixup_free,
 969                                                    (void *) oaddr, state);
 970                                 goto repeat;
 971                         default:
 972                                 hlist_del(&obj->node);
 973                                 __free_object(obj);
 974                                 break;
 975                         }
 976                 }
 977                 raw_spin_unlock_irqrestore(&db->lock, flags);
 978 
 979                 if (cnt > debug_objects_maxchain)
 980                         debug_objects_maxchain = cnt;
 981 
 982                 objs_checked += cnt;
 983         }
 984 
 985         if (objs_checked > debug_objects_maxchecked)
 986                 debug_objects_maxchecked = objs_checked;
 987 
 988         /* Schedule work to actually kmem_cache_free() objects */
 989         if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
 990                 WRITE_ONCE(obj_freeing, true);
 991                 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
 992         }
 993 }
 994 
 995 void debug_check_no_obj_freed(const void *address, unsigned long size)
 996 {
 997         if (debug_objects_enabled)
 998                 __debug_check_no_obj_freed(address, size);
 999 }
1000 #endif
1001 
1002 #ifdef CONFIG_DEBUG_FS
1003 
1004 static int debug_stats_show(struct seq_file *m, void *v)
1005 {
1006         int cpu, obj_percpu_free = 0;
1007 
1008         for_each_possible_cpu(cpu)
1009                 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1010 
1011         seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1012         seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1013         seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1014         seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1015         seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1016         seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1017         seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1018         seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1019         seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1020         seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1021         seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1022         seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1023         return 0;
1024 }
1025 
1026 static int debug_stats_open(struct inode *inode, struct file *filp)
1027 {
1028         return single_open(filp, debug_stats_show, NULL);
1029 }
1030 
1031 static const struct file_operations debug_stats_fops = {
1032         .open           = debug_stats_open,
1033         .read           = seq_read,
1034         .llseek         = seq_lseek,
1035         .release        = single_release,
1036 };
1037 
1038 static int __init debug_objects_init_debugfs(void)
1039 {
1040         struct dentry *dbgdir;
1041 
1042         if (!debug_objects_enabled)
1043                 return 0;
1044 
1045         dbgdir = debugfs_create_dir("debug_objects", NULL);
1046 
1047         debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1048 
1049         return 0;
1050 }
1051 __initcall(debug_objects_init_debugfs);
1052 
1053 #else
1054 static inline void debug_objects_init_debugfs(void) { }
1055 #endif
1056 
1057 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1058 
1059 /* Random data structure for the self test */
1060 struct self_test {
1061         unsigned long   dummy1[6];
1062         int             static_init;
1063         unsigned long   dummy2[3];
1064 };
1065 
1066 static __initdata struct debug_obj_descr descr_type_test;
1067 
1068 static bool __init is_static_object(void *addr)
1069 {
1070         struct self_test *obj = addr;
1071 
1072         return obj->static_init;
1073 }
1074 
1075 /*
1076  * fixup_init is called when:
1077  * - an active object is initialized
1078  */
1079 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1080 {
1081         struct self_test *obj = addr;
1082 
1083         switch (state) {
1084         case ODEBUG_STATE_ACTIVE:
1085                 debug_object_deactivate(obj, &descr_type_test);
1086                 debug_object_init(obj, &descr_type_test);
1087                 return true;
1088         default:
1089                 return false;
1090         }
1091 }
1092 
1093 /*
1094  * fixup_activate is called when:
1095  * - an active object is activated
1096  * - an unknown non-static object is activated
1097  */
1098 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1099 {
1100         struct self_test *obj = addr;
1101 
1102         switch (state) {
1103         case ODEBUG_STATE_NOTAVAILABLE:
1104                 return true;
1105         case ODEBUG_STATE_ACTIVE:
1106                 debug_object_deactivate(obj, &descr_type_test);
1107                 debug_object_activate(obj, &descr_type_test);
1108                 return true;
1109 
1110         default:
1111                 return false;
1112         }
1113 }
1114 
1115 /*
1116  * fixup_destroy is called when:
1117  * - an active object is destroyed
1118  */
1119 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1120 {
1121         struct self_test *obj = addr;
1122 
1123         switch (state) {
1124         case ODEBUG_STATE_ACTIVE:
1125                 debug_object_deactivate(obj, &descr_type_test);
1126                 debug_object_destroy(obj, &descr_type_test);
1127                 return true;
1128         default:
1129                 return false;
1130         }
1131 }
1132 
1133 /*
1134  * fixup_free is called when:
1135  * - an active object is freed
1136  */
1137 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1138 {
1139         struct self_test *obj = addr;
1140 
1141         switch (state) {
1142         case ODEBUG_STATE_ACTIVE:
1143                 debug_object_deactivate(obj, &descr_type_test);
1144                 debug_object_free(obj, &descr_type_test);
1145                 return true;
1146         default:
1147                 return false;
1148         }
1149 }
1150 
1151 static int __init
1152 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1153 {
1154         struct debug_bucket *db;
1155         struct debug_obj *obj;
1156         unsigned long flags;
1157         int res = -EINVAL;
1158 
1159         db = get_bucket((unsigned long) addr);
1160 
1161         raw_spin_lock_irqsave(&db->lock, flags);
1162 
1163         obj = lookup_object(addr, db);
1164         if (!obj && state != ODEBUG_STATE_NONE) {
1165                 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1166                 goto out;
1167         }
1168         if (obj && obj->state != state) {
1169                 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1170                        obj->state, state);
1171                 goto out;
1172         }
1173         if (fixups != debug_objects_fixups) {
1174                 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1175                        fixups, debug_objects_fixups);
1176                 goto out;
1177         }
1178         if (warnings != debug_objects_warnings) {
1179                 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1180                        warnings, debug_objects_warnings);
1181                 goto out;
1182         }
1183         res = 0;
1184 out:
1185         raw_spin_unlock_irqrestore(&db->lock, flags);
1186         if (res)
1187                 debug_objects_enabled = 0;
1188         return res;
1189 }
1190 
1191 static __initdata struct debug_obj_descr descr_type_test = {
1192         .name                   = "selftest",
1193         .is_static_object       = is_static_object,
1194         .fixup_init             = fixup_init,
1195         .fixup_activate         = fixup_activate,
1196         .fixup_destroy          = fixup_destroy,
1197         .fixup_free             = fixup_free,
1198 };
1199 
1200 static __initdata struct self_test obj = { .static_init = 0 };
1201 
1202 static void __init debug_objects_selftest(void)
1203 {
1204         int fixups, oldfixups, warnings, oldwarnings;
1205         unsigned long flags;
1206 
1207         local_irq_save(flags);
1208 
1209         fixups = oldfixups = debug_objects_fixups;
1210         warnings = oldwarnings = debug_objects_warnings;
1211         descr_test = &descr_type_test;
1212 
1213         debug_object_init(&obj, &descr_type_test);
1214         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1215                 goto out;
1216         debug_object_activate(&obj, &descr_type_test);
1217         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1218                 goto out;
1219         debug_object_activate(&obj, &descr_type_test);
1220         if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1221                 goto out;
1222         debug_object_deactivate(&obj, &descr_type_test);
1223         if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1224                 goto out;
1225         debug_object_destroy(&obj, &descr_type_test);
1226         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1227                 goto out;
1228         debug_object_init(&obj, &descr_type_test);
1229         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1230                 goto out;
1231         debug_object_activate(&obj, &descr_type_test);
1232         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1233                 goto out;
1234         debug_object_deactivate(&obj, &descr_type_test);
1235         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1236                 goto out;
1237         debug_object_free(&obj, &descr_type_test);
1238         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1239                 goto out;
1240 
1241         obj.static_init = 1;
1242         debug_object_activate(&obj, &descr_type_test);
1243         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1244                 goto out;
1245         debug_object_init(&obj, &descr_type_test);
1246         if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1247                 goto out;
1248         debug_object_free(&obj, &descr_type_test);
1249         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1250                 goto out;
1251 
1252 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1253         debug_object_init(&obj, &descr_type_test);
1254         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1255                 goto out;
1256         debug_object_activate(&obj, &descr_type_test);
1257         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1258                 goto out;
1259         __debug_check_no_obj_freed(&obj, sizeof(obj));
1260         if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1261                 goto out;
1262 #endif
1263         pr_info("selftest passed\n");
1264 
1265 out:
1266         debug_objects_fixups = oldfixups;
1267         debug_objects_warnings = oldwarnings;
1268         descr_test = NULL;
1269 
1270         local_irq_restore(flags);
1271 }
1272 #else
1273 static inline void debug_objects_selftest(void) { }
1274 #endif
1275 
1276 /*
1277  * Called during early boot to initialize the hash buckets and link
1278  * the static object pool objects into the poll list. After this call
1279  * the object tracker is fully operational.
1280  */
1281 void __init debug_objects_early_init(void)
1282 {
1283         int i;
1284 
1285         for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1286                 raw_spin_lock_init(&obj_hash[i].lock);
1287 
1288         for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1289                 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1290 }
1291 
1292 /*
1293  * Convert the statically allocated objects to dynamic ones:
1294  */
1295 static int __init debug_objects_replace_static_objects(void)
1296 {
1297         struct debug_bucket *db = obj_hash;
1298         struct hlist_node *tmp;
1299         struct debug_obj *obj, *new;
1300         HLIST_HEAD(objects);
1301         int i, cnt = 0;
1302 
1303         for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1304                 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1305                 if (!obj)
1306                         goto free;
1307                 hlist_add_head(&obj->node, &objects);
1308         }
1309 
1310         /*
1311          * debug_objects_mem_init() is now called early that only one CPU is up
1312          * and interrupts have been disabled, so it is safe to replace the
1313          * active object references.
1314          */
1315 
1316         /* Remove the statically allocated objects from the pool */
1317         hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1318                 hlist_del(&obj->node);
1319         /* Move the allocated objects to the pool */
1320         hlist_move_list(&objects, &obj_pool);
1321 
1322         /* Replace the active object references */
1323         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1324                 hlist_move_list(&db->list, &objects);
1325 
1326                 hlist_for_each_entry(obj, &objects, node) {
1327                         new = hlist_entry(obj_pool.first, typeof(*obj), node);
1328                         hlist_del(&new->node);
1329                         /* copy object data */
1330                         *new = *obj;
1331                         hlist_add_head(&new->node, &db->list);
1332                         cnt++;
1333                 }
1334         }
1335 
1336         pr_debug("%d of %d active objects replaced\n",
1337                  cnt, obj_pool_used);
1338         return 0;
1339 free:
1340         hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1341                 hlist_del(&obj->node);
1342                 kmem_cache_free(obj_cache, obj);
1343         }
1344         return -ENOMEM;
1345 }
1346 
1347 /*
1348  * Called after the kmem_caches are functional to setup a dedicated
1349  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1350  * prevents that the debug code is called on kmem_cache_free() for the
1351  * debug tracker objects to avoid recursive calls.
1352  */
1353 void __init debug_objects_mem_init(void)
1354 {
1355         int cpu, extras;
1356 
1357         if (!debug_objects_enabled)
1358                 return;
1359 
1360         /*
1361          * Initialize the percpu object pools
1362          *
1363          * Initialization is not strictly necessary, but was done for
1364          * completeness.
1365          */
1366         for_each_possible_cpu(cpu)
1367                 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1368 
1369         obj_cache = kmem_cache_create("debug_objects_cache",
1370                                       sizeof (struct debug_obj), 0,
1371                                       SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1372                                       NULL);
1373 
1374         if (!obj_cache || debug_objects_replace_static_objects()) {
1375                 debug_objects_enabled = 0;
1376                 kmem_cache_destroy(obj_cache);
1377                 pr_warn("out of memory.\n");
1378         } else
1379                 debug_objects_selftest();
1380 
1381         /*
1382          * Increase the thresholds for allocating and freeing objects
1383          * according to the number of possible CPUs available in the system.
1384          */
1385         extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1386         debug_objects_pool_size += extras;
1387         debug_objects_pool_min_level += extras;
1388 }

/* [<][>][^][v][top][bottom][index][help] */