linux/mm/debug.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * mm/debug.c
   4 *
   5 * mm/ specific debug routines.
   6 *
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/mm.h>
  11#include <linux/trace_events.h>
  12#include <linux/memcontrol.h>
  13#include <trace/events/mmflags.h>
  14#include <linux/migrate.h>
  15#include <linux/page_owner.h>
  16
  17#include "internal.h"
  18
  19char *migrate_reason_names[MR_TYPES] = {
  20        "compaction",
  21        "memory_failure",
  22        "memory_hotplug",
  23        "syscall_or_cpuset",
  24        "mempolicy_mbind",
  25        "numa_misplaced",
  26        "cma",
  27};
  28
  29const struct trace_print_flags pageflag_names[] = {
  30        __def_pageflag_names,
  31        {0, NULL}
  32};
  33
  34const struct trace_print_flags gfpflag_names[] = {
  35        __def_gfpflag_names,
  36        {0, NULL}
  37};
  38
  39const struct trace_print_flags vmaflag_names[] = {
  40        __def_vmaflag_names,
  41        {0, NULL}
  42};
  43
  44void __dump_page(struct page *page, const char *reason)
  45{
  46        /*
  47         * Avoid VM_BUG_ON() in page_mapcount().
  48         * page->_mapcount space in struct page is used by sl[aou]b pages to
  49         * encode own info.
  50         */
  51        int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
  52
  53        pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
  54                  page, page_ref_count(page), mapcount,
  55                  page->mapping, page_to_pgoff(page));
  56        if (PageCompound(page))
  57                pr_cont(" compound_mapcount: %d", compound_mapcount(page));
  58        pr_cont("\n");
  59        BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
  60
  61        pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags);
  62
  63        print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32,
  64                        sizeof(unsigned long), page,
  65                        sizeof(struct page), false);
  66
  67        if (reason)
  68                pr_alert("page dumped because: %s\n", reason);
  69
  70#ifdef CONFIG_MEMCG
  71        if (page->mem_cgroup)
  72                pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
  73#endif
  74}
  75
  76void dump_page(struct page *page, const char *reason)
  77{
  78        __dump_page(page, reason);
  79        dump_page_owner(page);
  80}
  81EXPORT_SYMBOL(dump_page);
  82
  83#ifdef CONFIG_DEBUG_VM
  84
  85void dump_vma(const struct vm_area_struct *vma)
  86{
  87        pr_emerg("vma %px start %px end %px\n"
  88                "next %px prev %px mm %px\n"
  89                "prot %lx anon_vma %px vm_ops %px\n"
  90                "pgoff %lx file %px private_data %px\n"
  91                "flags: %#lx(%pGv)\n",
  92                vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
  93                vma->vm_prev, vma->vm_mm,
  94                (unsigned long)pgprot_val(vma->vm_page_prot),
  95                vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
  96                vma->vm_file, vma->vm_private_data,
  97                vma->vm_flags, &vma->vm_flags);
  98}
  99EXPORT_SYMBOL(dump_vma);
 100
 101void dump_mm(const struct mm_struct *mm)
 102{
 103        pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
 104#ifdef CONFIG_MMU
 105                "get_unmapped_area %px\n"
 106#endif
 107                "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
 108                "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
 109                "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
 110                "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
 111                "start_code %lx end_code %lx start_data %lx end_data %lx\n"
 112                "start_brk %lx brk %lx start_stack %lx\n"
 113                "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
 114                "binfmt %px flags %lx core_state %px\n"
 115#ifdef CONFIG_AIO
 116                "ioctx_table %px\n"
 117#endif
 118#ifdef CONFIG_MEMCG
 119                "owner %px "
 120#endif
 121                "exe_file %px\n"
 122#ifdef CONFIG_MMU_NOTIFIER
 123                "mmu_notifier_mm %px\n"
 124#endif
 125#ifdef CONFIG_NUMA_BALANCING
 126                "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
 127#endif
 128                "tlb_flush_pending %d\n"
 129                "def_flags: %#lx(%pGv)\n",
 130
 131                mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
 132#ifdef CONFIG_MMU
 133                mm->get_unmapped_area,
 134#endif
 135                mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
 136                mm->pgd, atomic_read(&mm->mm_users),
 137                atomic_read(&mm->mm_count),
 138                mm_pgtables_bytes(mm),
 139                mm->map_count,
 140                mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
 141                mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm,
 142                mm->start_code, mm->end_code, mm->start_data, mm->end_data,
 143                mm->start_brk, mm->brk, mm->start_stack,
 144                mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
 145                mm->binfmt, mm->flags, mm->core_state,
 146#ifdef CONFIG_AIO
 147                mm->ioctx_table,
 148#endif
 149#ifdef CONFIG_MEMCG
 150                mm->owner,
 151#endif
 152                mm->exe_file,
 153#ifdef CONFIG_MMU_NOTIFIER
 154                mm->mmu_notifier_mm,
 155#endif
 156#ifdef CONFIG_NUMA_BALANCING
 157                mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
 158#endif
 159                atomic_read(&mm->tlb_flush_pending),
 160                mm->def_flags, &mm->def_flags
 161        );
 162}
 163
 164#endif          /* CONFIG_DEBUG_VM */
 165