root/mm/debug.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __dump_page
  2. dump_page
  3. dump_vma
  4. dump_mm
  5. setup_vm_debug
  6. page_init_poison

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * mm/debug.c
   4  *
   5  * mm/ specific debug routines.
   6  *
   7  */
   8 
   9 #include <linux/kernel.h>
  10 #include <linux/mm.h>
  11 #include <linux/trace_events.h>
  12 #include <linux/memcontrol.h>
  13 #include <trace/events/mmflags.h>
  14 #include <linux/migrate.h>
  15 #include <linux/page_owner.h>
  16 #include <linux/ctype.h>
  17 
  18 #include "internal.h"
  19 
  20 const char *migrate_reason_names[MR_TYPES] = {
  21         "compaction",
  22         "memory_failure",
  23         "memory_hotplug",
  24         "syscall_or_cpuset",
  25         "mempolicy_mbind",
  26         "numa_misplaced",
  27         "cma",
  28 };
  29 
  30 const struct trace_print_flags pageflag_names[] = {
  31         __def_pageflag_names,
  32         {0, NULL}
  33 };
  34 
  35 const struct trace_print_flags gfpflag_names[] = {
  36         __def_gfpflag_names,
  37         {0, NULL}
  38 };
  39 
  40 const struct trace_print_flags vmaflag_names[] = {
  41         __def_vmaflag_names,
  42         {0, NULL}
  43 };
  44 
  45 void __dump_page(struct page *page, const char *reason)
  46 {
  47         struct address_space *mapping;
  48         bool page_poisoned = PagePoisoned(page);
  49         int mapcount;
  50         char *type = "";
  51 
  52         /*
  53          * If struct page is poisoned don't access Page*() functions as that
  54          * leads to recursive loop. Page*() check for poisoned pages, and calls
  55          * dump_page() when detected.
  56          */
  57         if (page_poisoned) {
  58                 pr_warn("page:%px is uninitialized and poisoned", page);
  59                 goto hex_only;
  60         }
  61 
  62         mapping = page_mapping(page);
  63 
  64         /*
  65          * Avoid VM_BUG_ON() in page_mapcount().
  66          * page->_mapcount space in struct page is used by sl[aou]b pages to
  67          * encode own info.
  68          */
  69         mapcount = PageSlab(page) ? 0 : page_mapcount(page);
  70 
  71         if (PageCompound(page))
  72                 pr_warn("page:%px refcount:%d mapcount:%d mapping:%px "
  73                         "index:%#lx compound_mapcount: %d\n",
  74                         page, page_ref_count(page), mapcount,
  75                         page->mapping, page_to_pgoff(page),
  76                         compound_mapcount(page));
  77         else
  78                 pr_warn("page:%px refcount:%d mapcount:%d mapping:%px index:%#lx\n",
  79                         page, page_ref_count(page), mapcount,
  80                         page->mapping, page_to_pgoff(page));
  81         if (PageKsm(page))
  82                 type = "ksm ";
  83         else if (PageAnon(page))
  84                 type = "anon ";
  85         else if (mapping) {
  86                 if (mapping->host && mapping->host->i_dentry.first) {
  87                         struct dentry *dentry;
  88                         dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
  89                         pr_warn("%ps name:\"%pd\"\n", mapping->a_ops, dentry);
  90                 } else
  91                         pr_warn("%ps\n", mapping->a_ops);
  92         }
  93         BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
  94 
  95         pr_warn("%sflags: %#lx(%pGp)\n", type, page->flags, &page->flags);
  96 
  97 hex_only:
  98         print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
  99                         sizeof(unsigned long), page,
 100                         sizeof(struct page), false);
 101 
 102         if (reason)
 103                 pr_warn("page dumped because: %s\n", reason);
 104 
 105 #ifdef CONFIG_MEMCG
 106         if (!page_poisoned && page->mem_cgroup)
 107                 pr_warn("page->mem_cgroup:%px\n", page->mem_cgroup);
 108 #endif
 109 }
 110 
 111 void dump_page(struct page *page, const char *reason)
 112 {
 113         __dump_page(page, reason);
 114         dump_page_owner(page);
 115 }
 116 EXPORT_SYMBOL(dump_page);
 117 
 118 #ifdef CONFIG_DEBUG_VM
 119 
 120 void dump_vma(const struct vm_area_struct *vma)
 121 {
 122         pr_emerg("vma %px start %px end %px\n"
 123                 "next %px prev %px mm %px\n"
 124                 "prot %lx anon_vma %px vm_ops %px\n"
 125                 "pgoff %lx file %px private_data %px\n"
 126                 "flags: %#lx(%pGv)\n",
 127                 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
 128                 vma->vm_prev, vma->vm_mm,
 129                 (unsigned long)pgprot_val(vma->vm_page_prot),
 130                 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
 131                 vma->vm_file, vma->vm_private_data,
 132                 vma->vm_flags, &vma->vm_flags);
 133 }
 134 EXPORT_SYMBOL(dump_vma);
 135 
 136 void dump_mm(const struct mm_struct *mm)
 137 {
 138         pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
 139 #ifdef CONFIG_MMU
 140                 "get_unmapped_area %px\n"
 141 #endif
 142                 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
 143                 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
 144                 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
 145                 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
 146                 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
 147                 "start_brk %lx brk %lx start_stack %lx\n"
 148                 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
 149                 "binfmt %px flags %lx core_state %px\n"
 150 #ifdef CONFIG_AIO
 151                 "ioctx_table %px\n"
 152 #endif
 153 #ifdef CONFIG_MEMCG
 154                 "owner %px "
 155 #endif
 156                 "exe_file %px\n"
 157 #ifdef CONFIG_MMU_NOTIFIER
 158                 "mmu_notifier_mm %px\n"
 159 #endif
 160 #ifdef CONFIG_NUMA_BALANCING
 161                 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
 162 #endif
 163                 "tlb_flush_pending %d\n"
 164                 "def_flags: %#lx(%pGv)\n",
 165 
 166                 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
 167 #ifdef CONFIG_MMU
 168                 mm->get_unmapped_area,
 169 #endif
 170                 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
 171                 mm->pgd, atomic_read(&mm->mm_users),
 172                 atomic_read(&mm->mm_count),
 173                 mm_pgtables_bytes(mm),
 174                 mm->map_count,
 175                 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
 176                 (u64)atomic64_read(&mm->pinned_vm),
 177                 mm->data_vm, mm->exec_vm, mm->stack_vm,
 178                 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
 179                 mm->start_brk, mm->brk, mm->start_stack,
 180                 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
 181                 mm->binfmt, mm->flags, mm->core_state,
 182 #ifdef CONFIG_AIO
 183                 mm->ioctx_table,
 184 #endif
 185 #ifdef CONFIG_MEMCG
 186                 mm->owner,
 187 #endif
 188                 mm->exe_file,
 189 #ifdef CONFIG_MMU_NOTIFIER
 190                 mm->mmu_notifier_mm,
 191 #endif
 192 #ifdef CONFIG_NUMA_BALANCING
 193                 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
 194 #endif
 195                 atomic_read(&mm->tlb_flush_pending),
 196                 mm->def_flags, &mm->def_flags
 197         );
 198 }
 199 
 200 static bool page_init_poisoning __read_mostly = true;
 201 
 202 static int __init setup_vm_debug(char *str)
 203 {
 204         bool __page_init_poisoning = true;
 205 
 206         /*
 207          * Calling vm_debug with no arguments is equivalent to requesting
 208          * to enable all debugging options we can control.
 209          */
 210         if (*str++ != '=' || !*str)
 211                 goto out;
 212 
 213         __page_init_poisoning = false;
 214         if (*str == '-')
 215                 goto out;
 216 
 217         while (*str) {
 218                 switch (tolower(*str)) {
 219                 case'p':
 220                         __page_init_poisoning = true;
 221                         break;
 222                 default:
 223                         pr_err("vm_debug option '%c' unknown. skipped\n",
 224                                *str);
 225                 }
 226 
 227                 str++;
 228         }
 229 out:
 230         if (page_init_poisoning && !__page_init_poisoning)
 231                 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
 232 
 233         page_init_poisoning = __page_init_poisoning;
 234 
 235         return 1;
 236 }
 237 __setup("vm_debug", setup_vm_debug);
 238 
 239 void page_init_poison(struct page *page, size_t size)
 240 {
 241         if (page_init_poisoning)
 242                 memset(page, PAGE_POISON_PATTERN, size);
 243 }
 244 EXPORT_SYMBOL_GPL(page_init_poison);
 245 #endif          /* CONFIG_DEBUG_VM */

/* [<][>][^][v][top][bottom][index][help] */