1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * mm/debug.c |
4 | * |
5 | * mm/ specific debug routines. |
6 | * |
7 | */ |
8 | |
9 | #include <linux/kernel.h> |
10 | #include <linux/mm.h> |
11 | #include <linux/trace_events.h> |
12 | #include <linux/memcontrol.h> |
13 | #include <trace/events/mmflags.h> |
14 | #include <linux/migrate.h> |
15 | #include <linux/page_owner.h> |
16 | #include <linux/ctype.h> |
17 | |
18 | #include "internal.h" |
19 | |
20 | const char *migrate_reason_names[MR_TYPES] = { |
21 | "compaction" , |
22 | "memory_failure" , |
23 | "memory_hotplug" , |
24 | "syscall_or_cpuset" , |
25 | "mempolicy_mbind" , |
26 | "numa_misplaced" , |
27 | "cma" , |
28 | }; |
29 | |
30 | const struct trace_print_flags pageflag_names[] = { |
31 | __def_pageflag_names, |
32 | {0, NULL} |
33 | }; |
34 | |
35 | const struct trace_print_flags gfpflag_names[] = { |
36 | __def_gfpflag_names, |
37 | {0, NULL} |
38 | }; |
39 | |
40 | const struct trace_print_flags vmaflag_names[] = { |
41 | __def_vmaflag_names, |
42 | {0, NULL} |
43 | }; |
44 | |
45 | void __dump_page(struct page *page, const char *reason) |
46 | { |
47 | struct address_space *mapping; |
48 | bool page_poisoned = PagePoisoned(page); |
49 | int mapcount; |
50 | |
51 | /* |
52 | * If struct page is poisoned don't access Page*() functions as that |
53 | * leads to recursive loop. Page*() check for poisoned pages, and calls |
54 | * dump_page() when detected. |
55 | */ |
56 | if (page_poisoned) { |
57 | pr_warn("page:%px is uninitialized and poisoned" , page); |
58 | goto hex_only; |
59 | } |
60 | |
61 | mapping = page_mapping(page); |
62 | |
63 | /* |
64 | * Avoid VM_BUG_ON() in page_mapcount(). |
65 | * page->_mapcount space in struct page is used by sl[aou]b pages to |
66 | * encode own info. |
67 | */ |
68 | mapcount = PageSlab(page) ? 0 : page_mapcount(page); |
69 | |
70 | pr_warn("page:%px count:%d mapcount:%d mapping:%px index:%#lx" , |
71 | page, page_ref_count(page), mapcount, |
72 | page->mapping, page_to_pgoff(page)); |
73 | if (PageCompound(page)) |
74 | pr_cont(" compound_mapcount: %d" , compound_mapcount(page)); |
75 | pr_cont("\n" ); |
76 | if (PageAnon(page)) |
77 | pr_warn("anon " ); |
78 | else if (PageKsm(page)) |
79 | pr_warn("ksm " ); |
80 | else if (mapping) { |
81 | pr_warn("%ps " , mapping->a_ops); |
82 | if (mapping->host->i_dentry.first) { |
83 | struct dentry *dentry; |
84 | dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias); |
85 | pr_warn("name:\"%pd\" " , dentry); |
86 | } |
87 | } |
88 | BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); |
89 | |
90 | pr_warn("flags: %#lx(%pGp)\n" , page->flags, &page->flags); |
91 | |
92 | hex_only: |
93 | print_hex_dump(KERN_WARNING, "raw: " , DUMP_PREFIX_NONE, 32, |
94 | sizeof(unsigned long), page, |
95 | sizeof(struct page), false); |
96 | |
97 | if (reason) |
98 | pr_warn("page dumped because: %s\n" , reason); |
99 | |
100 | #ifdef CONFIG_MEMCG |
101 | if (!page_poisoned && page->mem_cgroup) |
102 | pr_warn("page->mem_cgroup:%px\n" , page->mem_cgroup); |
103 | #endif |
104 | } |
105 | |
106 | void dump_page(struct page *page, const char *reason) |
107 | { |
108 | __dump_page(page, reason); |
109 | dump_page_owner(page); |
110 | } |
111 | EXPORT_SYMBOL(dump_page); |
112 | |
113 | #ifdef CONFIG_DEBUG_VM |
114 | |
115 | void dump_vma(const struct vm_area_struct *vma) |
116 | { |
117 | pr_emerg("vma %px start %px end %px\n" |
118 | "next %px prev %px mm %px\n" |
119 | "prot %lx anon_vma %px vm_ops %px\n" |
120 | "pgoff %lx file %px private_data %px\n" |
121 | "flags: %#lx(%pGv)\n" , |
122 | vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, |
123 | vma->vm_prev, vma->vm_mm, |
124 | (unsigned long)pgprot_val(vma->vm_page_prot), |
125 | vma->anon_vma, vma->vm_ops, vma->vm_pgoff, |
126 | vma->vm_file, vma->vm_private_data, |
127 | vma->vm_flags, &vma->vm_flags); |
128 | } |
129 | EXPORT_SYMBOL(dump_vma); |
130 | |
131 | void dump_mm(const struct mm_struct *mm) |
132 | { |
133 | pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n" |
134 | #ifdef CONFIG_MMU |
135 | "get_unmapped_area %px\n" |
136 | #endif |
137 | "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" |
138 | "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" |
139 | "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" |
140 | "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n" |
141 | "start_code %lx end_code %lx start_data %lx end_data %lx\n" |
142 | "start_brk %lx brk %lx start_stack %lx\n" |
143 | "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" |
144 | "binfmt %px flags %lx core_state %px\n" |
145 | #ifdef CONFIG_AIO |
146 | "ioctx_table %px\n" |
147 | #endif |
148 | #ifdef CONFIG_MEMCG |
149 | "owner %px " |
150 | #endif |
151 | "exe_file %px\n" |
152 | #ifdef CONFIG_MMU_NOTIFIER |
153 | "mmu_notifier_mm %px\n" |
154 | #endif |
155 | #ifdef CONFIG_NUMA_BALANCING |
156 | "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" |
157 | #endif |
158 | "tlb_flush_pending %d\n" |
159 | "def_flags: %#lx(%pGv)\n" , |
160 | |
161 | mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, |
162 | #ifdef CONFIG_MMU |
163 | mm->get_unmapped_area, |
164 | #endif |
165 | mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, |
166 | mm->pgd, atomic_read(&mm->mm_users), |
167 | atomic_read(&mm->mm_count), |
168 | mm_pgtables_bytes(mm), |
169 | mm->map_count, |
170 | mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, |
171 | atomic64_read(&mm->pinned_vm), |
172 | mm->data_vm, mm->exec_vm, mm->stack_vm, |
173 | mm->start_code, mm->end_code, mm->start_data, mm->end_data, |
174 | mm->start_brk, mm->brk, mm->start_stack, |
175 | mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, |
176 | mm->binfmt, mm->flags, mm->core_state, |
177 | #ifdef CONFIG_AIO |
178 | mm->ioctx_table, |
179 | #endif |
180 | #ifdef CONFIG_MEMCG |
181 | mm->owner, |
182 | #endif |
183 | mm->exe_file, |
184 | #ifdef CONFIG_MMU_NOTIFIER |
185 | mm->mmu_notifier_mm, |
186 | #endif |
187 | #ifdef CONFIG_NUMA_BALANCING |
188 | mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, |
189 | #endif |
190 | atomic_read(&mm->tlb_flush_pending), |
191 | mm->def_flags, &mm->def_flags |
192 | ); |
193 | } |
194 | |
195 | static bool page_init_poisoning __read_mostly = true; |
196 | |
197 | static int __init setup_vm_debug(char *str) |
198 | { |
199 | bool __page_init_poisoning = true; |
200 | |
201 | /* |
202 | * Calling vm_debug with no arguments is equivalent to requesting |
203 | * to enable all debugging options we can control. |
204 | */ |
205 | if (*str++ != '=' || !*str) |
206 | goto out; |
207 | |
208 | __page_init_poisoning = false; |
209 | if (*str == '-') |
210 | goto out; |
211 | |
212 | while (*str) { |
213 | switch (tolower(*str)) { |
214 | case'p': |
215 | __page_init_poisoning = true; |
216 | break; |
217 | default: |
218 | pr_err("vm_debug option '%c' unknown. skipped\n" , |
219 | *str); |
220 | } |
221 | |
222 | str++; |
223 | } |
224 | out: |
225 | if (page_init_poisoning && !__page_init_poisoning) |
226 | pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n" ); |
227 | |
228 | page_init_poisoning = __page_init_poisoning; |
229 | |
230 | return 1; |
231 | } |
232 | __setup("vm_debug" , setup_vm_debug); |
233 | |
234 | void page_init_poison(struct page *page, size_t size) |
235 | { |
236 | if (page_init_poisoning) |
237 | memset(page, PAGE_POISON_PATTERN, size); |
238 | } |
239 | EXPORT_SYMBOL_GPL(page_init_poison); |
240 | #endif /* CONFIG_DEBUG_VM */ |
241 | |