1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Page Attribute Table (PAT) support: handle memory caching attributes in page tables. |
4 | * |
5 | * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> |
6 | * Suresh B Siddha <suresh.b.siddha@intel.com> |
7 | * |
8 | * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. |
9 | * |
10 | * Basic principles: |
11 | * |
12 | * PAT is a CPU feature supported by all modern x86 CPUs, to allow the firmware and |
13 | * the kernel to set one of a handful of 'caching type' attributes for physical |
14 | * memory ranges: uncached, write-combining, write-through, write-protected, |
15 | * and the most commonly used and default attribute: write-back caching. |
16 | * |
17 | * PAT support supersedes and augments MTRR support in a compatible fashion: MTRR is |
18 | * a hardware interface to enumerate a limited number of physical memory ranges |
19 | * and set their caching attributes explicitly, programmed into the CPU via MSRs. |
20 | * Even modern CPUs have MTRRs enabled - but these are typically not touched |
21 | * by the kernel or by user-space (such as the X server), we rely on PAT for any |
22 | * additional cache attribute logic. |
23 | * |
24 | * PAT doesn't work via explicit memory ranges, but uses page table entries to add |
25 | * cache attribute information to the mapped memory range: there's 3 bits used, |
26 | * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT), with the 8 possible values mapped by the |
27 | * CPU to actual cache attributes via an MSR loaded into the CPU (MSR_IA32_CR_PAT). |
28 | * |
29 | * ( There's a metric ton of finer details, such as compatibility with CPU quirks |
30 | * that only support 4 types of PAT entries, and interaction with MTRRs, see |
31 | * below for details. ) |
32 | */ |
33 | |
34 | #include <linux/seq_file.h> |
35 | #include <linux/memblock.h> |
36 | #include <linux/debugfs.h> |
37 | #include <linux/ioport.h> |
38 | #include <linux/kernel.h> |
39 | #include <linux/pfn_t.h> |
40 | #include <linux/slab.h> |
41 | #include <linux/mm.h> |
42 | #include <linux/fs.h> |
43 | #include <linux/rbtree.h> |
44 | |
45 | #include <asm/cacheflush.h> |
46 | #include <asm/cacheinfo.h> |
47 | #include <asm/processor.h> |
48 | #include <asm/tlbflush.h> |
49 | #include <asm/x86_init.h> |
50 | #include <asm/fcntl.h> |
51 | #include <asm/e820/api.h> |
52 | #include <asm/mtrr.h> |
53 | #include <asm/page.h> |
54 | #include <asm/msr.h> |
55 | #include <asm/memtype.h> |
56 | #include <asm/io.h> |
57 | |
58 | #include "memtype.h" |
59 | #include "../mm_internal.h" |
60 | |
61 | #undef pr_fmt |
62 | #define pr_fmt(fmt) "" fmt |
63 | |
64 | static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT); |
65 | static u64 __ro_after_init pat_msr_val; |
66 | |
67 | /* |
68 | * PAT support is enabled by default, but can be disabled for |
69 | * various user-requested or hardware-forced reasons: |
70 | */ |
71 | static void __init pat_disable(const char *msg_reason) |
72 | { |
73 | if (pat_disabled) |
74 | return; |
75 | |
76 | pat_disabled = true; |
77 | pr_info("x86/PAT: %s\n" , msg_reason); |
78 | |
79 | memory_caching_control &= ~CACHE_PAT; |
80 | } |
81 | |
82 | static int __init nopat(char *str) |
83 | { |
84 | pat_disable(msg_reason: "PAT support disabled via boot option." ); |
85 | return 0; |
86 | } |
87 | early_param("nopat" , nopat); |
88 | |
89 | bool pat_enabled(void) |
90 | { |
91 | return !pat_disabled; |
92 | } |
93 | EXPORT_SYMBOL_GPL(pat_enabled); |
94 | |
95 | int pat_debug_enable; |
96 | |
97 | static int __init pat_debug_setup(char *str) |
98 | { |
99 | pat_debug_enable = 1; |
100 | return 1; |
101 | } |
102 | __setup("debugpat" , pat_debug_setup); |
103 | |
104 | #ifdef CONFIG_X86_PAT |
105 | /* |
106 | * X86 PAT uses page flags arch_1 and uncached together to keep track of |
107 | * memory type of pages that have backing page struct. |
108 | * |
109 | * X86 PAT supports 4 different memory types: |
110 | * - _PAGE_CACHE_MODE_WB |
111 | * - _PAGE_CACHE_MODE_WC |
112 | * - _PAGE_CACHE_MODE_UC_MINUS |
113 | * - _PAGE_CACHE_MODE_WT |
114 | * |
115 | * _PAGE_CACHE_MODE_WB is the default type. |
116 | */ |
117 | |
118 | #define _PGMT_WB 0 |
119 | #define _PGMT_WC (1UL << PG_arch_1) |
120 | #define _PGMT_UC_MINUS (1UL << PG_uncached) |
121 | #define _PGMT_WT (1UL << PG_uncached | 1UL << PG_arch_1) |
122 | #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) |
123 | #define _PGMT_CLEAR_MASK (~_PGMT_MASK) |
124 | |
125 | static inline enum page_cache_mode get_page_memtype(struct page *pg) |
126 | { |
127 | unsigned long pg_flags = pg->flags & _PGMT_MASK; |
128 | |
129 | if (pg_flags == _PGMT_WB) |
130 | return _PAGE_CACHE_MODE_WB; |
131 | else if (pg_flags == _PGMT_WC) |
132 | return _PAGE_CACHE_MODE_WC; |
133 | else if (pg_flags == _PGMT_UC_MINUS) |
134 | return _PAGE_CACHE_MODE_UC_MINUS; |
135 | else |
136 | return _PAGE_CACHE_MODE_WT; |
137 | } |
138 | |
139 | static inline void set_page_memtype(struct page *pg, |
140 | enum page_cache_mode memtype) |
141 | { |
142 | unsigned long memtype_flags; |
143 | unsigned long old_flags; |
144 | unsigned long new_flags; |
145 | |
146 | switch (memtype) { |
147 | case _PAGE_CACHE_MODE_WC: |
148 | memtype_flags = _PGMT_WC; |
149 | break; |
150 | case _PAGE_CACHE_MODE_UC_MINUS: |
151 | memtype_flags = _PGMT_UC_MINUS; |
152 | break; |
153 | case _PAGE_CACHE_MODE_WT: |
154 | memtype_flags = _PGMT_WT; |
155 | break; |
156 | case _PAGE_CACHE_MODE_WB: |
157 | default: |
158 | memtype_flags = _PGMT_WB; |
159 | break; |
160 | } |
161 | |
162 | old_flags = READ_ONCE(pg->flags); |
163 | do { |
164 | new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; |
165 | } while (!try_cmpxchg(&pg->flags, &old_flags, new_flags)); |
166 | } |
167 | #else |
168 | static inline enum page_cache_mode get_page_memtype(struct page *pg) |
169 | { |
170 | return -1; |
171 | } |
172 | static inline void set_page_memtype(struct page *pg, |
173 | enum page_cache_mode memtype) |
174 | { |
175 | } |
176 | #endif |
177 | |
178 | enum { |
179 | PAT_UC = 0, /* uncached */ |
180 | PAT_WC = 1, /* Write combining */ |
181 | PAT_WT = 4, /* Write Through */ |
182 | PAT_WP = 5, /* Write Protected */ |
183 | PAT_WB = 6, /* Write Back (default) */ |
184 | PAT_UC_MINUS = 7, /* UC, but can be overridden by MTRR */ |
185 | }; |
186 | |
187 | #define CM(c) (_PAGE_CACHE_MODE_ ## c) |
188 | |
189 | static enum page_cache_mode __init pat_get_cache_mode(unsigned int pat_val, |
190 | char *msg) |
191 | { |
192 | enum page_cache_mode cache; |
193 | char *cache_mode; |
194 | |
195 | switch (pat_val) { |
196 | case PAT_UC: cache = CM(UC); cache_mode = "UC " ; break; |
197 | case PAT_WC: cache = CM(WC); cache_mode = "WC " ; break; |
198 | case PAT_WT: cache = CM(WT); cache_mode = "WT " ; break; |
199 | case PAT_WP: cache = CM(WP); cache_mode = "WP " ; break; |
200 | case PAT_WB: cache = CM(WB); cache_mode = "WB " ; break; |
201 | case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- " ; break; |
202 | default: cache = CM(WB); cache_mode = "WB " ; break; |
203 | } |
204 | |
205 | memcpy(msg, cache_mode, 4); |
206 | |
207 | return cache; |
208 | } |
209 | |
210 | #undef CM |
211 | |
212 | /* |
213 | * Update the cache mode to pgprot translation tables according to PAT |
214 | * configuration. |
215 | * Using lower indices is preferred, so we start with highest index. |
216 | */ |
217 | static void __init init_cache_modes(u64 pat) |
218 | { |
219 | enum page_cache_mode cache; |
220 | char pat_msg[33]; |
221 | int i; |
222 | |
223 | pat_msg[32] = 0; |
224 | for (i = 7; i >= 0; i--) { |
225 | cache = pat_get_cache_mode(pat_val: (pat >> (i * 8)) & 7, |
226 | msg: pat_msg + 4 * i); |
227 | update_cache_mode_entry(entry: i, cache); |
228 | } |
229 | pr_info("x86/PAT: Configuration [0-7]: %s\n" , pat_msg); |
230 | } |
231 | |
232 | void pat_cpu_init(void) |
233 | { |
234 | if (!boot_cpu_has(X86_FEATURE_PAT)) { |
235 | /* |
236 | * If this happens we are on a secondary CPU, but switched to |
237 | * PAT on the boot CPU. We have no way to undo PAT. |
238 | */ |
239 | panic(fmt: "x86/PAT: PAT enabled, but not supported by secondary CPU\n" ); |
240 | } |
241 | |
242 | wrmsrl(MSR_IA32_CR_PAT, val: pat_msr_val); |
243 | |
244 | __flush_tlb_all(); |
245 | } |
246 | |
247 | /** |
248 | * pat_bp_init - Initialize the PAT MSR value and PAT table |
249 | * |
250 | * This function initializes PAT MSR value and PAT table with an OS-defined |
251 | * value to enable additional cache attributes, WC, WT and WP. |
252 | * |
253 | * This function prepares the calls of pat_cpu_init() via cache_cpu_init() |
254 | * on all CPUs. |
255 | */ |
256 | void __init pat_bp_init(void) |
257 | { |
258 | struct cpuinfo_x86 *c = &boot_cpu_data; |
259 | #define PAT(p0, p1, p2, p3, p4, p5, p6, p7) \ |
260 | (((u64)PAT_ ## p0) | ((u64)PAT_ ## p1 << 8) | \ |
261 | ((u64)PAT_ ## p2 << 16) | ((u64)PAT_ ## p3 << 24) | \ |
262 | ((u64)PAT_ ## p4 << 32) | ((u64)PAT_ ## p5 << 40) | \ |
263 | ((u64)PAT_ ## p6 << 48) | ((u64)PAT_ ## p7 << 56)) |
264 | |
265 | |
266 | if (!IS_ENABLED(CONFIG_X86_PAT)) |
267 | pr_info_once("x86/PAT: PAT support disabled because CONFIG_X86_PAT is disabled in the kernel.\n" ); |
268 | |
269 | if (!cpu_feature_enabled(X86_FEATURE_PAT)) |
270 | pat_disable(msg_reason: "PAT not supported by the CPU." ); |
271 | else |
272 | rdmsrl(MSR_IA32_CR_PAT, pat_msr_val); |
273 | |
274 | if (!pat_msr_val) { |
275 | pat_disable(msg_reason: "PAT support disabled by the firmware." ); |
276 | |
277 | /* |
278 | * No PAT. Emulate the PAT table that corresponds to the two |
279 | * cache bits, PWT (Write Through) and PCD (Cache Disable). |
280 | * This setup is also the same as the BIOS default setup. |
281 | * |
282 | * PTE encoding: |
283 | * |
284 | * PCD |
285 | * |PWT PAT |
286 | * || slot |
287 | * 00 0 WB : _PAGE_CACHE_MODE_WB |
288 | * 01 1 WT : _PAGE_CACHE_MODE_WT |
289 | * 10 2 UC-: _PAGE_CACHE_MODE_UC_MINUS |
290 | * 11 3 UC : _PAGE_CACHE_MODE_UC |
291 | * |
292 | * NOTE: When WC or WP is used, it is redirected to UC- per |
293 | * the default setup in __cachemode2pte_tbl[]. |
294 | */ |
295 | pat_msr_val = PAT(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC); |
296 | } |
297 | |
298 | /* |
299 | * Xen PV doesn't allow to set PAT MSR, but all cache modes are |
300 | * supported. |
301 | */ |
302 | if (pat_disabled || cpu_feature_enabled(X86_FEATURE_XENPV)) { |
303 | init_cache_modes(pat: pat_msr_val); |
304 | return; |
305 | } |
306 | |
307 | if ((c->x86_vendor == X86_VENDOR_INTEL) && |
308 | (((c->x86 == 0x6) && (c->x86_model <= 0xd)) || |
309 | ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) { |
310 | /* |
311 | * PAT support with the lower four entries. Intel Pentium 2, |
312 | * 3, M, and 4 are affected by PAT errata, which makes the |
313 | * upper four entries unusable. To be on the safe side, we don't |
314 | * use those. |
315 | * |
316 | * PTE encoding: |
317 | * PAT |
318 | * |PCD |
319 | * ||PWT PAT |
320 | * ||| slot |
321 | * 000 0 WB : _PAGE_CACHE_MODE_WB |
322 | * 001 1 WC : _PAGE_CACHE_MODE_WC |
323 | * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS |
324 | * 011 3 UC : _PAGE_CACHE_MODE_UC |
325 | * PAT bit unused |
326 | * |
327 | * NOTE: When WT or WP is used, it is redirected to UC- per |
328 | * the default setup in __cachemode2pte_tbl[]. |
329 | */ |
330 | pat_msr_val = PAT(WB, WC, UC_MINUS, UC, WB, WC, UC_MINUS, UC); |
331 | } else { |
332 | /* |
333 | * Full PAT support. We put WT in slot 7 to improve |
334 | * robustness in the presence of errata that might cause |
335 | * the high PAT bit to be ignored. This way, a buggy slot 7 |
336 | * access will hit slot 3, and slot 3 is UC, so at worst |
337 | * we lose performance without causing a correctness issue. |
338 | * Pentium 4 erratum N46 is an example for such an erratum, |
339 | * although we try not to use PAT at all on affected CPUs. |
340 | * |
341 | * PTE encoding: |
342 | * PAT |
343 | * |PCD |
344 | * ||PWT PAT |
345 | * ||| slot |
346 | * 000 0 WB : _PAGE_CACHE_MODE_WB |
347 | * 001 1 WC : _PAGE_CACHE_MODE_WC |
348 | * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS |
349 | * 011 3 UC : _PAGE_CACHE_MODE_UC |
350 | * 100 4 WB : Reserved |
351 | * 101 5 WP : _PAGE_CACHE_MODE_WP |
352 | * 110 6 UC-: Reserved |
353 | * 111 7 WT : _PAGE_CACHE_MODE_WT |
354 | * |
355 | * The reserved slots are unused, but mapped to their |
356 | * corresponding types in the presence of PAT errata. |
357 | */ |
358 | pat_msr_val = PAT(WB, WC, UC_MINUS, UC, WB, WP, UC_MINUS, WT); |
359 | } |
360 | |
361 | memory_caching_control |= CACHE_PAT; |
362 | |
363 | init_cache_modes(pat: pat_msr_val); |
364 | #undef PAT |
365 | } |
366 | |
367 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */ |
368 | |
369 | /* |
370 | * Does intersection of PAT memory type and MTRR memory type and returns |
371 | * the resulting memory type as PAT understands it. |
372 | * (Type in pat and mtrr will not have same value) |
373 | * The intersection is based on "Effective Memory Type" tables in IA-32 |
374 | * SDM vol 3a |
375 | */ |
376 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, |
377 | enum page_cache_mode req_type) |
378 | { |
379 | /* |
380 | * Look for MTRR hint to get the effective type in case where PAT |
381 | * request is for WB. |
382 | */ |
383 | if (req_type == _PAGE_CACHE_MODE_WB) { |
384 | u8 mtrr_type, uniform; |
385 | |
386 | mtrr_type = mtrr_type_lookup(addr: start, end, uniform: &uniform); |
387 | if (mtrr_type != MTRR_TYPE_WRBACK) |
388 | return _PAGE_CACHE_MODE_UC_MINUS; |
389 | |
390 | return _PAGE_CACHE_MODE_WB; |
391 | } |
392 | |
393 | return req_type; |
394 | } |
395 | |
396 | struct { |
397 | unsigned long ; |
398 | int ; |
399 | int ; |
400 | }; |
401 | |
402 | static int |
403 | (unsigned long initial_pfn, unsigned long total_nr_pages, void *arg) |
404 | { |
405 | struct pagerange_state *state = arg; |
406 | |
407 | state->not_ram |= initial_pfn > state->cur_pfn; |
408 | state->ram |= total_nr_pages > 0; |
409 | state->cur_pfn = initial_pfn + total_nr_pages; |
410 | |
411 | return state->ram && state->not_ram; |
412 | } |
413 | |
414 | static int (resource_size_t start, resource_size_t end) |
415 | { |
416 | int ret = 0; |
417 | unsigned long start_pfn = start >> PAGE_SHIFT; |
418 | unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; |
419 | struct pagerange_state state = {start_pfn, 0, 0}; |
420 | |
421 | /* |
422 | * For legacy reasons, physical address range in the legacy ISA |
423 | * region is tracked as non-RAM. This will allow users of |
424 | * /dev/mem to map portions of legacy ISA region, even when |
425 | * some of those portions are listed(or not even listed) with |
426 | * different e820 types(RAM/reserved/..) |
427 | */ |
428 | if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT) |
429 | start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT; |
430 | |
431 | if (start_pfn < end_pfn) { |
432 | ret = walk_system_ram_range(start_pfn, nr_pages: end_pfn - start_pfn, |
433 | arg: &state, func: pagerange_is_ram_callback); |
434 | } |
435 | |
436 | return (ret > 0) ? -1 : (state.ram ? 1 : 0); |
437 | } |
438 | |
439 | /* |
440 | * For RAM pages, we use page flags to mark the pages with appropriate type. |
441 | * The page flags are limited to four types, WB (default), WC, WT and UC-. |
442 | * WP request fails with -EINVAL, and UC gets redirected to UC-. Setting |
443 | * a new memory type is only allowed for a page mapped with the default WB |
444 | * type. |
445 | * |
446 | * Here we do two passes: |
447 | * - Find the memtype of all the pages in the range, look for any conflicts. |
448 | * - In case of no conflicts, set the new memtype for pages in the range. |
449 | */ |
450 | static int reserve_ram_pages_type(u64 start, u64 end, |
451 | enum page_cache_mode req_type, |
452 | enum page_cache_mode *new_type) |
453 | { |
454 | struct page *page; |
455 | u64 pfn; |
456 | |
457 | if (req_type == _PAGE_CACHE_MODE_WP) { |
458 | if (new_type) |
459 | *new_type = _PAGE_CACHE_MODE_UC_MINUS; |
460 | return -EINVAL; |
461 | } |
462 | |
463 | if (req_type == _PAGE_CACHE_MODE_UC) { |
464 | /* We do not support strong UC */ |
465 | WARN_ON_ONCE(1); |
466 | req_type = _PAGE_CACHE_MODE_UC_MINUS; |
467 | } |
468 | |
469 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { |
470 | enum page_cache_mode type; |
471 | |
472 | page = pfn_to_page(pfn); |
473 | type = get_page_memtype(pg: page); |
474 | if (type != _PAGE_CACHE_MODE_WB) { |
475 | pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n" , |
476 | start, end - 1, type, req_type); |
477 | if (new_type) |
478 | *new_type = type; |
479 | |
480 | return -EBUSY; |
481 | } |
482 | } |
483 | |
484 | if (new_type) |
485 | *new_type = req_type; |
486 | |
487 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { |
488 | page = pfn_to_page(pfn); |
489 | set_page_memtype(pg: page, memtype: req_type); |
490 | } |
491 | return 0; |
492 | } |
493 | |
494 | static int free_ram_pages_type(u64 start, u64 end) |
495 | { |
496 | struct page *page; |
497 | u64 pfn; |
498 | |
499 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { |
500 | page = pfn_to_page(pfn); |
501 | set_page_memtype(pg: page, memtype: _PAGE_CACHE_MODE_WB); |
502 | } |
503 | return 0; |
504 | } |
505 | |
506 | static u64 sanitize_phys(u64 address) |
507 | { |
508 | /* |
509 | * When changing the memtype for pages containing poison allow |
510 | * for a "decoy" virtual address (bit 63 clear) passed to |
511 | * set_memory_X(). __pa() on a "decoy" address results in a |
512 | * physical address with bit 63 set. |
513 | * |
514 | * Decoy addresses are not present for 32-bit builds, see |
515 | * set_mce_nospec(). |
516 | */ |
517 | if (IS_ENABLED(CONFIG_X86_64)) |
518 | return address & __PHYSICAL_MASK; |
519 | return address; |
520 | } |
521 | |
522 | /* |
523 | * req_type typically has one of the: |
524 | * - _PAGE_CACHE_MODE_WB |
525 | * - _PAGE_CACHE_MODE_WC |
526 | * - _PAGE_CACHE_MODE_UC_MINUS |
527 | * - _PAGE_CACHE_MODE_UC |
528 | * - _PAGE_CACHE_MODE_WT |
529 | * |
530 | * If new_type is NULL, function will return an error if it cannot reserve the |
531 | * region with req_type. If new_type is non-NULL, function will return |
532 | * available type in new_type in case of no error. In case of any error |
533 | * it will return a negative return value. |
534 | */ |
535 | int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type, |
536 | enum page_cache_mode *new_type) |
537 | { |
538 | struct memtype *entry_new; |
539 | enum page_cache_mode actual_type; |
540 | int is_range_ram; |
541 | int err = 0; |
542 | |
543 | start = sanitize_phys(address: start); |
544 | |
545 | /* |
546 | * The end address passed into this function is exclusive, but |
547 | * sanitize_phys() expects an inclusive address. |
548 | */ |
549 | end = sanitize_phys(address: end - 1) + 1; |
550 | if (start >= end) { |
551 | WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n" , __func__, |
552 | start, end - 1, cattr_name(req_type)); |
553 | return -EINVAL; |
554 | } |
555 | |
556 | if (!pat_enabled()) { |
557 | /* This is identical to page table setting without PAT */ |
558 | if (new_type) |
559 | *new_type = req_type; |
560 | return 0; |
561 | } |
562 | |
563 | /* Low ISA region is always mapped WB in page table. No need to track */ |
564 | if (x86_platform.is_untracked_pat_range(start, end)) { |
565 | if (new_type) |
566 | *new_type = _PAGE_CACHE_MODE_WB; |
567 | return 0; |
568 | } |
569 | |
570 | /* |
571 | * Call mtrr_lookup to get the type hint. This is an |
572 | * optimization for /dev/mem mmap'ers into WB memory (BIOS |
573 | * tools and ACPI tools). Use WB request for WB memory and use |
574 | * UC_MINUS otherwise. |
575 | */ |
576 | actual_type = pat_x_mtrr_type(start, end, req_type); |
577 | |
578 | if (new_type) |
579 | *new_type = actual_type; |
580 | |
581 | is_range_ram = pat_pagerange_is_ram(start, end); |
582 | if (is_range_ram == 1) { |
583 | |
584 | err = reserve_ram_pages_type(start, end, req_type, new_type); |
585 | |
586 | return err; |
587 | } else if (is_range_ram < 0) { |
588 | return -EINVAL; |
589 | } |
590 | |
591 | entry_new = kzalloc(size: sizeof(struct memtype), GFP_KERNEL); |
592 | if (!entry_new) |
593 | return -ENOMEM; |
594 | |
595 | entry_new->start = start; |
596 | entry_new->end = end; |
597 | entry_new->type = actual_type; |
598 | |
599 | spin_lock(lock: &memtype_lock); |
600 | |
601 | err = memtype_check_insert(entry_new, new_type); |
602 | if (err) { |
603 | pr_info("x86/PAT: memtype_reserve failed [mem %#010Lx-%#010Lx], track %s, req %s\n" , |
604 | start, end - 1, |
605 | cattr_name(entry_new->type), cattr_name(req_type)); |
606 | kfree(objp: entry_new); |
607 | spin_unlock(lock: &memtype_lock); |
608 | |
609 | return err; |
610 | } |
611 | |
612 | spin_unlock(lock: &memtype_lock); |
613 | |
614 | dprintk("memtype_reserve added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n" , |
615 | start, end - 1, cattr_name(entry_new->type), cattr_name(req_type), |
616 | new_type ? cattr_name(*new_type) : "-" ); |
617 | |
618 | return err; |
619 | } |
620 | |
621 | int memtype_free(u64 start, u64 end) |
622 | { |
623 | int is_range_ram; |
624 | struct memtype *entry_old; |
625 | |
626 | if (!pat_enabled()) |
627 | return 0; |
628 | |
629 | start = sanitize_phys(address: start); |
630 | end = sanitize_phys(address: end); |
631 | |
632 | /* Low ISA region is always mapped WB. No need to track */ |
633 | if (x86_platform.is_untracked_pat_range(start, end)) |
634 | return 0; |
635 | |
636 | is_range_ram = pat_pagerange_is_ram(start, end); |
637 | if (is_range_ram == 1) |
638 | return free_ram_pages_type(start, end); |
639 | if (is_range_ram < 0) |
640 | return -EINVAL; |
641 | |
642 | spin_lock(lock: &memtype_lock); |
643 | entry_old = memtype_erase(start, end); |
644 | spin_unlock(lock: &memtype_lock); |
645 | |
646 | if (IS_ERR(ptr: entry_old)) { |
647 | pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n" , |
648 | current->comm, current->pid, start, end - 1); |
649 | return -EINVAL; |
650 | } |
651 | |
652 | kfree(objp: entry_old); |
653 | |
654 | dprintk("memtype_free request [mem %#010Lx-%#010Lx]\n" , start, end - 1); |
655 | |
656 | return 0; |
657 | } |
658 | |
659 | |
660 | /** |
661 | * lookup_memtype - Looks up the memory type for a physical address |
662 | * @paddr: physical address of which memory type needs to be looked up |
663 | * |
664 | * Only to be called when PAT is enabled |
665 | * |
666 | * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS |
667 | * or _PAGE_CACHE_MODE_WT. |
668 | */ |
669 | static enum page_cache_mode lookup_memtype(u64 paddr) |
670 | { |
671 | enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB; |
672 | struct memtype *entry; |
673 | |
674 | if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) |
675 | return rettype; |
676 | |
677 | if (pat_pagerange_is_ram(start: paddr, end: paddr + PAGE_SIZE)) { |
678 | struct page *page; |
679 | |
680 | page = pfn_to_page(paddr >> PAGE_SHIFT); |
681 | return get_page_memtype(pg: page); |
682 | } |
683 | |
684 | spin_lock(lock: &memtype_lock); |
685 | |
686 | entry = memtype_lookup(addr: paddr); |
687 | if (entry != NULL) |
688 | rettype = entry->type; |
689 | else |
690 | rettype = _PAGE_CACHE_MODE_UC_MINUS; |
691 | |
692 | spin_unlock(lock: &memtype_lock); |
693 | |
694 | return rettype; |
695 | } |
696 | |
697 | /** |
698 | * pat_pfn_immune_to_uc_mtrr - Check whether the PAT memory type |
699 | * of @pfn cannot be overridden by UC MTRR memory type. |
700 | * |
701 | * Only to be called when PAT is enabled. |
702 | * |
703 | * Returns true, if the PAT memory type of @pfn is UC, UC-, or WC. |
704 | * Returns false in other cases. |
705 | */ |
706 | bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn) |
707 | { |
708 | enum page_cache_mode cm = lookup_memtype(PFN_PHYS(pfn)); |
709 | |
710 | return cm == _PAGE_CACHE_MODE_UC || |
711 | cm == _PAGE_CACHE_MODE_UC_MINUS || |
712 | cm == _PAGE_CACHE_MODE_WC; |
713 | } |
714 | EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr); |
715 | |
716 | /** |
717 | * memtype_reserve_io - Request a memory type mapping for a region of memory |
718 | * @start: start (physical address) of the region |
719 | * @end: end (physical address) of the region |
720 | * @type: A pointer to memtype, with requested type. On success, requested |
721 | * or any other compatible type that was available for the region is returned |
722 | * |
723 | * On success, returns 0 |
724 | * On failure, returns non-zero |
725 | */ |
726 | int memtype_reserve_io(resource_size_t start, resource_size_t end, |
727 | enum page_cache_mode *type) |
728 | { |
729 | resource_size_t size = end - start; |
730 | enum page_cache_mode req_type = *type; |
731 | enum page_cache_mode new_type; |
732 | int ret; |
733 | |
734 | WARN_ON_ONCE(iomem_map_sanity_check(start, size)); |
735 | |
736 | ret = memtype_reserve(start, end, req_type, new_type: &new_type); |
737 | if (ret) |
738 | goto out_err; |
739 | |
740 | if (!is_new_memtype_allowed(paddr: start, size, pcm: req_type, new_pcm: new_type)) |
741 | goto out_free; |
742 | |
743 | if (memtype_kernel_map_sync(base: start, size, pcm: new_type) < 0) |
744 | goto out_free; |
745 | |
746 | *type = new_type; |
747 | return 0; |
748 | |
749 | out_free: |
750 | memtype_free(start, end); |
751 | ret = -EBUSY; |
752 | out_err: |
753 | return ret; |
754 | } |
755 | |
756 | /** |
757 | * memtype_free_io - Release a memory type mapping for a region of memory |
758 | * @start: start (physical address) of the region |
759 | * @end: end (physical address) of the region |
760 | */ |
761 | void memtype_free_io(resource_size_t start, resource_size_t end) |
762 | { |
763 | memtype_free(start, end); |
764 | } |
765 | |
766 | #ifdef CONFIG_X86_PAT |
767 | int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size) |
768 | { |
769 | enum page_cache_mode type = _PAGE_CACHE_MODE_WC; |
770 | |
771 | return memtype_reserve_io(start, end: start + size, type: &type); |
772 | } |
773 | EXPORT_SYMBOL(arch_io_reserve_memtype_wc); |
774 | |
775 | void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size) |
776 | { |
777 | memtype_free_io(start, end: start + size); |
778 | } |
779 | EXPORT_SYMBOL(arch_io_free_memtype_wc); |
780 | #endif |
781 | |
782 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
783 | unsigned long size, pgprot_t vma_prot) |
784 | { |
785 | if (!phys_mem_access_encrypted(phys_addr: pfn << PAGE_SHIFT, size)) |
786 | vma_prot = pgprot_decrypted(vma_prot); |
787 | |
788 | return vma_prot; |
789 | } |
790 | |
791 | #ifdef CONFIG_STRICT_DEVMEM |
792 | /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */ |
793 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
794 | { |
795 | return 1; |
796 | } |
797 | #else |
798 | /* This check is needed to avoid cache aliasing when PAT is enabled */ |
799 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
800 | { |
801 | u64 from = ((u64)pfn) << PAGE_SHIFT; |
802 | u64 to = from + size; |
803 | u64 cursor = from; |
804 | |
805 | if (!pat_enabled()) |
806 | return 1; |
807 | |
808 | while (cursor < to) { |
809 | if (!devmem_is_allowed(pfn)) |
810 | return 0; |
811 | cursor += PAGE_SIZE; |
812 | pfn++; |
813 | } |
814 | return 1; |
815 | } |
816 | #endif /* CONFIG_STRICT_DEVMEM */ |
817 | |
818 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
819 | unsigned long size, pgprot_t *vma_prot) |
820 | { |
821 | enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB; |
822 | |
823 | if (!range_is_allowed(pfn, size)) |
824 | return 0; |
825 | |
826 | if (file->f_flags & O_DSYNC) |
827 | pcm = _PAGE_CACHE_MODE_UC_MINUS; |
828 | |
829 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | |
830 | cachemode2protval(pcm)); |
831 | return 1; |
832 | } |
833 | |
834 | /* |
835 | * Change the memory type for the physical address range in kernel identity |
836 | * mapping space if that range is a part of identity map. |
837 | */ |
838 | int memtype_kernel_map_sync(u64 base, unsigned long size, |
839 | enum page_cache_mode pcm) |
840 | { |
841 | unsigned long id_sz; |
842 | |
843 | if (base > __pa(high_memory-1)) |
844 | return 0; |
845 | |
846 | /* |
847 | * Some areas in the middle of the kernel identity range |
848 | * are not mapped, for example the PCI space. |
849 | */ |
850 | if (!page_is_ram(pfn: base >> PAGE_SHIFT)) |
851 | return 0; |
852 | |
853 | id_sz = (__pa(high_memory-1) <= base + size) ? |
854 | __pa(high_memory) - base : size; |
855 | |
856 | if (ioremap_change_attr(vaddr: (unsigned long)__va(base), size: id_sz, pcm) < 0) { |
857 | pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n" , |
858 | current->comm, current->pid, |
859 | cattr_name(pcm), |
860 | base, (unsigned long long)(base + size-1)); |
861 | return -EINVAL; |
862 | } |
863 | return 0; |
864 | } |
865 | |
866 | /* |
867 | * Internal interface to reserve a range of physical memory with prot. |
868 | * Reserved non RAM regions only and after successful memtype_reserve, |
869 | * this func also keeps identity mapping (if any) in sync with this new prot. |
870 | */ |
871 | static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, |
872 | int strict_prot) |
873 | { |
874 | int is_ram = 0; |
875 | int ret; |
876 | enum page_cache_mode want_pcm = pgprot2cachemode(pgprot: *vma_prot); |
877 | enum page_cache_mode pcm = want_pcm; |
878 | |
879 | is_ram = pat_pagerange_is_ram(start: paddr, end: paddr + size); |
880 | |
881 | /* |
882 | * reserve_pfn_range() for RAM pages. We do not refcount to keep |
883 | * track of number of mappings of RAM pages. We can assert that |
884 | * the type requested matches the type of first page in the range. |
885 | */ |
886 | if (is_ram) { |
887 | if (!pat_enabled()) |
888 | return 0; |
889 | |
890 | pcm = lookup_memtype(paddr); |
891 | if (want_pcm != pcm) { |
892 | pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n" , |
893 | current->comm, current->pid, |
894 | cattr_name(want_pcm), |
895 | (unsigned long long)paddr, |
896 | (unsigned long long)(paddr + size - 1), |
897 | cattr_name(pcm)); |
898 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & |
899 | (~_PAGE_CACHE_MASK)) | |
900 | cachemode2protval(pcm)); |
901 | } |
902 | return 0; |
903 | } |
904 | |
905 | ret = memtype_reserve(start: paddr, end: paddr + size, req_type: want_pcm, new_type: &pcm); |
906 | if (ret) |
907 | return ret; |
908 | |
909 | if (pcm != want_pcm) { |
910 | if (strict_prot || |
911 | !is_new_memtype_allowed(paddr, size, pcm: want_pcm, new_pcm: pcm)) { |
912 | memtype_free(start: paddr, end: paddr + size); |
913 | pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n" , |
914 | current->comm, current->pid, |
915 | cattr_name(want_pcm), |
916 | (unsigned long long)paddr, |
917 | (unsigned long long)(paddr + size - 1), |
918 | cattr_name(pcm)); |
919 | return -EINVAL; |
920 | } |
921 | /* |
922 | * We allow returning different type than the one requested in |
923 | * non strict case. |
924 | */ |
925 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & |
926 | (~_PAGE_CACHE_MASK)) | |
927 | cachemode2protval(pcm)); |
928 | } |
929 | |
930 | if (memtype_kernel_map_sync(base: paddr, size, pcm) < 0) { |
931 | memtype_free(start: paddr, end: paddr + size); |
932 | return -EINVAL; |
933 | } |
934 | return 0; |
935 | } |
936 | |
937 | /* |
938 | * Internal interface to free a range of physical memory. |
939 | * Frees non RAM regions only. |
940 | */ |
941 | static void free_pfn_range(u64 paddr, unsigned long size) |
942 | { |
943 | int is_ram; |
944 | |
945 | is_ram = pat_pagerange_is_ram(start: paddr, end: paddr + size); |
946 | if (is_ram == 0) |
947 | memtype_free(start: paddr, end: paddr + size); |
948 | } |
949 | |
950 | static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr, |
951 | pgprot_t *pgprot) |
952 | { |
953 | unsigned long prot; |
954 | |
955 | VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT)); |
956 | |
957 | /* |
958 | * We need the starting PFN and cachemode used for track_pfn_remap() |
959 | * that covered the whole VMA. For most mappings, we can obtain that |
960 | * information from the page tables. For COW mappings, we might now |
961 | * suddenly have anon folios mapped and follow_phys() will fail. |
962 | * |
963 | * Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to |
964 | * detect the PFN. If we need the cachemode as well, we're out of luck |
965 | * for now and have to fail fork(). |
966 | */ |
967 | if (!follow_phys(vma, address: vma->vm_start, flags: 0, prot: &prot, phys: paddr)) { |
968 | if (pgprot) |
969 | *pgprot = __pgprot(prot); |
970 | return 0; |
971 | } |
972 | if (is_cow_mapping(flags: vma->vm_flags)) { |
973 | if (pgprot) |
974 | return -EINVAL; |
975 | *paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; |
976 | return 0; |
977 | } |
978 | WARN_ON_ONCE(1); |
979 | return -EINVAL; |
980 | } |
981 | |
982 | /* |
983 | * track_pfn_copy is called when vma that is covering the pfnmap gets |
984 | * copied through copy_page_range(). |
985 | * |
986 | * If the vma has a linear pfn mapping for the entire range, we get the prot |
987 | * from pte and reserve the entire vma range with single reserve_pfn_range call. |
988 | */ |
989 | int track_pfn_copy(struct vm_area_struct *vma) |
990 | { |
991 | resource_size_t paddr; |
992 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
993 | pgprot_t pgprot; |
994 | |
995 | if (vma->vm_flags & VM_PAT) { |
996 | if (get_pat_info(vma, paddr: &paddr, pgprot: &pgprot)) |
997 | return -EINVAL; |
998 | /* reserve the whole chunk covered by vma. */ |
999 | return reserve_pfn_range(paddr, size: vma_size, vma_prot: &pgprot, strict_prot: 1); |
1000 | } |
1001 | |
1002 | return 0; |
1003 | } |
1004 | |
1005 | /* |
1006 | * prot is passed in as a parameter for the new mapping. If the vma has |
1007 | * a linear pfn mapping for the entire range, or no vma is provided, |
1008 | * reserve the entire pfn + size range with single reserve_pfn_range |
1009 | * call. |
1010 | */ |
1011 | int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, |
1012 | unsigned long pfn, unsigned long addr, unsigned long size) |
1013 | { |
1014 | resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; |
1015 | enum page_cache_mode pcm; |
1016 | |
1017 | /* reserve the whole chunk starting from paddr */ |
1018 | if (!vma || (addr == vma->vm_start |
1019 | && size == (vma->vm_end - vma->vm_start))) { |
1020 | int ret; |
1021 | |
1022 | ret = reserve_pfn_range(paddr, size, vma_prot: prot, strict_prot: 0); |
1023 | if (ret == 0 && vma) |
1024 | vm_flags_set(vma, VM_PAT); |
1025 | return ret; |
1026 | } |
1027 | |
1028 | if (!pat_enabled()) |
1029 | return 0; |
1030 | |
1031 | /* |
1032 | * For anything smaller than the vma size we set prot based on the |
1033 | * lookup. |
1034 | */ |
1035 | pcm = lookup_memtype(paddr); |
1036 | |
1037 | /* Check memtype for the remaining pages */ |
1038 | while (size > PAGE_SIZE) { |
1039 | size -= PAGE_SIZE; |
1040 | paddr += PAGE_SIZE; |
1041 | if (pcm != lookup_memtype(paddr)) |
1042 | return -EINVAL; |
1043 | } |
1044 | |
1045 | *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) | |
1046 | cachemode2protval(pcm)); |
1047 | |
1048 | return 0; |
1049 | } |
1050 | |
1051 | void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn) |
1052 | { |
1053 | enum page_cache_mode pcm; |
1054 | |
1055 | if (!pat_enabled()) |
1056 | return; |
1057 | |
1058 | /* Set prot based on lookup */ |
1059 | pcm = lookup_memtype(paddr: pfn_t_to_phys(pfn)); |
1060 | *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) | |
1061 | cachemode2protval(pcm)); |
1062 | } |
1063 | |
1064 | /* |
1065 | * untrack_pfn is called while unmapping a pfnmap for a region. |
1066 | * untrack can be called for a specific region indicated by pfn and size or |
1067 | * can be for the entire vma (in which case pfn, size are zero). |
1068 | */ |
1069 | void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, |
1070 | unsigned long size, bool mm_wr_locked) |
1071 | { |
1072 | resource_size_t paddr; |
1073 | |
1074 | if (vma && !(vma->vm_flags & VM_PAT)) |
1075 | return; |
1076 | |
1077 | /* free the chunk starting from pfn or the whole chunk */ |
1078 | paddr = (resource_size_t)pfn << PAGE_SHIFT; |
1079 | if (!paddr && !size) { |
1080 | if (get_pat_info(vma, paddr: &paddr, NULL)) |
1081 | return; |
1082 | size = vma->vm_end - vma->vm_start; |
1083 | } |
1084 | free_pfn_range(paddr, size); |
1085 | if (vma) { |
1086 | if (mm_wr_locked) |
1087 | vm_flags_clear(vma, VM_PAT); |
1088 | else |
1089 | __vm_flags_mod(vma, set: 0, VM_PAT); |
1090 | } |
1091 | } |
1092 | |
1093 | /* |
1094 | * untrack_pfn_clear is called if the following situation fits: |
1095 | * |
1096 | * 1) while mremapping a pfnmap for a new region, with the old vma after |
1097 | * its pfnmap page table has been removed. The new vma has a new pfnmap |
1098 | * to the same pfn & cache type with VM_PAT set. |
1099 | * 2) while duplicating vm area, the new vma fails to copy the pgtable from |
1100 | * old vma. |
1101 | */ |
1102 | void untrack_pfn_clear(struct vm_area_struct *vma) |
1103 | { |
1104 | vm_flags_clear(vma, VM_PAT); |
1105 | } |
1106 | |
1107 | pgprot_t pgprot_writecombine(pgprot_t prot) |
1108 | { |
1109 | return __pgprot(pgprot_val(prot) | |
1110 | cachemode2protval(_PAGE_CACHE_MODE_WC)); |
1111 | } |
1112 | EXPORT_SYMBOL_GPL(pgprot_writecombine); |
1113 | |
1114 | pgprot_t pgprot_writethrough(pgprot_t prot) |
1115 | { |
1116 | return __pgprot(pgprot_val(prot) | |
1117 | cachemode2protval(_PAGE_CACHE_MODE_WT)); |
1118 | } |
1119 | EXPORT_SYMBOL_GPL(pgprot_writethrough); |
1120 | |
1121 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) |
1122 | |
1123 | /* |
1124 | * We are allocating a temporary printout-entry to be passed |
1125 | * between seq_start()/next() and seq_show(): |
1126 | */ |
1127 | static struct memtype *memtype_get_idx(loff_t pos) |
1128 | { |
1129 | struct memtype *entry_print; |
1130 | int ret; |
1131 | |
1132 | entry_print = kzalloc(size: sizeof(struct memtype), GFP_KERNEL); |
1133 | if (!entry_print) |
1134 | return NULL; |
1135 | |
1136 | spin_lock(lock: &memtype_lock); |
1137 | ret = memtype_copy_nth_element(entry_out: entry_print, pos); |
1138 | spin_unlock(lock: &memtype_lock); |
1139 | |
1140 | /* Free it on error: */ |
1141 | if (ret) { |
1142 | kfree(objp: entry_print); |
1143 | return NULL; |
1144 | } |
1145 | |
1146 | return entry_print; |
1147 | } |
1148 | |
1149 | static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) |
1150 | { |
1151 | if (*pos == 0) { |
1152 | ++*pos; |
1153 | seq_puts(m: seq, s: "PAT memtype list:\n" ); |
1154 | } |
1155 | |
1156 | return memtype_get_idx(pos: *pos); |
1157 | } |
1158 | |
1159 | static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
1160 | { |
1161 | kfree(objp: v); |
1162 | ++*pos; |
1163 | return memtype_get_idx(pos: *pos); |
1164 | } |
1165 | |
1166 | static void memtype_seq_stop(struct seq_file *seq, void *v) |
1167 | { |
1168 | kfree(objp: v); |
1169 | } |
1170 | |
1171 | static int memtype_seq_show(struct seq_file *seq, void *v) |
1172 | { |
1173 | struct memtype *entry_print = (struct memtype *)v; |
1174 | |
1175 | seq_printf(m: seq, fmt: "PAT: [mem 0x%016Lx-0x%016Lx] %s\n" , |
1176 | entry_print->start, |
1177 | entry_print->end, |
1178 | cattr_name(pcm: entry_print->type)); |
1179 | |
1180 | return 0; |
1181 | } |
1182 | |
1183 | static const struct seq_operations memtype_seq_ops = { |
1184 | .start = memtype_seq_start, |
1185 | .next = memtype_seq_next, |
1186 | .stop = memtype_seq_stop, |
1187 | .show = memtype_seq_show, |
1188 | }; |
1189 | |
1190 | static int memtype_seq_open(struct inode *inode, struct file *file) |
1191 | { |
1192 | return seq_open(file, &memtype_seq_ops); |
1193 | } |
1194 | |
1195 | static const struct file_operations memtype_fops = { |
1196 | .open = memtype_seq_open, |
1197 | .read = seq_read, |
1198 | .llseek = seq_lseek, |
1199 | .release = seq_release, |
1200 | }; |
1201 | |
1202 | static int __init pat_memtype_list_init(void) |
1203 | { |
1204 | if (pat_enabled()) { |
1205 | debugfs_create_file(name: "pat_memtype_list" , S_IRUSR, |
1206 | parent: arch_debugfs_dir, NULL, fops: &memtype_fops); |
1207 | } |
1208 | return 0; |
1209 | } |
1210 | late_initcall(pat_memtype_list_init); |
1211 | |
1212 | #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */ |
1213 | |