1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright(c) 2017 Intel Corporation. All rights reserved. |
4 | * |
5 | * This code is based in part on work published here: |
6 | * |
7 | * https://github.com/IAIK/KAISER |
8 | * |
9 | * The original work was written by and signed off by for the Linux |
10 | * kernel by: |
11 | * |
12 | * Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at> |
13 | * Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at> |
14 | * Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at> |
15 | * Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at> |
16 | * |
17 | * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com> |
18 | * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and |
19 | * Andy Lutomirsky <luto@amacapital.net> |
20 | */ |
21 | #include <linux/kernel.h> |
22 | #include <linux/errno.h> |
23 | #include <linux/string.h> |
24 | #include <linux/types.h> |
25 | #include <linux/bug.h> |
26 | #include <linux/init.h> |
27 | #include <linux/spinlock.h> |
28 | #include <linux/mm.h> |
29 | #include <linux/uaccess.h> |
30 | #include <linux/cpu.h> |
31 | |
32 | #include <asm/cpufeature.h> |
33 | #include <asm/hypervisor.h> |
34 | #include <asm/vsyscall.h> |
35 | #include <asm/cmdline.h> |
36 | #include <asm/pti.h> |
37 | #include <asm/tlbflush.h> |
38 | #include <asm/desc.h> |
39 | #include <asm/sections.h> |
40 | #include <asm/set_memory.h> |
41 | |
42 | #undef pr_fmt |
43 | #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt |
44 | |
45 | /* Backporting helper */ |
46 | #ifndef __GFP_NOTRACK |
47 | #define __GFP_NOTRACK 0 |
48 | #endif |
49 | |
50 | /* |
51 | * Define the page-table levels we clone for user-space on 32 |
52 | * and 64 bit. |
53 | */ |
54 | #ifdef CONFIG_X86_64 |
55 | #define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PMD |
56 | #else |
57 | #define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PTE |
58 | #endif |
59 | |
60 | static void __init pti_print_if_insecure(const char *reason) |
61 | { |
62 | if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) |
63 | pr_info("%s\n" , reason); |
64 | } |
65 | |
66 | static void __init pti_print_if_secure(const char *reason) |
67 | { |
68 | if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) |
69 | pr_info("%s\n" , reason); |
70 | } |
71 | |
72 | /* Assume mode is auto unless overridden via cmdline below. */ |
73 | static enum pti_mode { |
74 | PTI_AUTO = 0, |
75 | PTI_FORCE_OFF, |
76 | PTI_FORCE_ON |
77 | } pti_mode; |
78 | |
79 | void __init pti_check_boottime_disable(void) |
80 | { |
81 | if (hypervisor_is_type(type: X86_HYPER_XEN_PV)) { |
82 | pti_mode = PTI_FORCE_OFF; |
83 | pti_print_if_insecure(reason: "disabled on XEN PV." ); |
84 | return; |
85 | } |
86 | |
87 | if (cpu_mitigations_off()) |
88 | pti_mode = PTI_FORCE_OFF; |
89 | if (pti_mode == PTI_FORCE_OFF) { |
90 | pti_print_if_insecure(reason: "disabled on command line." ); |
91 | return; |
92 | } |
93 | |
94 | if (pti_mode == PTI_FORCE_ON) |
95 | pti_print_if_secure(reason: "force enabled on command line." ); |
96 | |
97 | if (pti_mode == PTI_AUTO && !boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) |
98 | return; |
99 | |
100 | setup_force_cpu_cap(X86_FEATURE_PTI); |
101 | } |
102 | |
103 | static int __init pti_parse_cmdline(char *arg) |
104 | { |
105 | if (!strcmp(arg, "off" )) |
106 | pti_mode = PTI_FORCE_OFF; |
107 | else if (!strcmp(arg, "on" )) |
108 | pti_mode = PTI_FORCE_ON; |
109 | else if (!strcmp(arg, "auto" )) |
110 | pti_mode = PTI_AUTO; |
111 | else |
112 | return -EINVAL; |
113 | return 0; |
114 | } |
115 | early_param("pti" , pti_parse_cmdline); |
116 | |
117 | static int __init pti_parse_cmdline_nopti(char *arg) |
118 | { |
119 | pti_mode = PTI_FORCE_OFF; |
120 | return 0; |
121 | } |
122 | early_param("nopti" , pti_parse_cmdline_nopti); |
123 | |
124 | pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd) |
125 | { |
126 | /* |
127 | * Changes to the high (kernel) portion of the kernelmode page |
128 | * tables are not automatically propagated to the usermode tables. |
129 | * |
130 | * Users should keep in mind that, unlike the kernelmode tables, |
131 | * there is no vmalloc_fault equivalent for the usermode tables. |
132 | * Top-level entries added to init_mm's usermode pgd after boot |
133 | * will not be automatically propagated to other mms. |
134 | */ |
135 | if (!pgdp_maps_userspace(ptr: pgdp)) |
136 | return pgd; |
137 | |
138 | /* |
139 | * The user page tables get the full PGD, accessible from |
140 | * userspace: |
141 | */ |
142 | kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd; |
143 | |
144 | /* |
145 | * If this is normal user memory, make it NX in the kernel |
146 | * pagetables so that, if we somehow screw up and return to |
147 | * usermode with the kernel CR3 loaded, we'll get a page fault |
148 | * instead of allowing user code to execute with the wrong CR3. |
149 | * |
150 | * As exceptions, we don't set NX if: |
151 | * - _PAGE_USER is not set. This could be an executable |
152 | * EFI runtime mapping or something similar, and the kernel |
153 | * may execute from it |
154 | * - we don't have NX support |
155 | * - we're clearing the PGD (i.e. the new pgd is not present). |
156 | */ |
157 | if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) && |
158 | (__supported_pte_mask & _PAGE_NX)) |
159 | pgd.pgd |= _PAGE_NX; |
160 | |
161 | /* return the copy of the PGD we want the kernel to use: */ |
162 | return pgd; |
163 | } |
164 | |
165 | /* |
166 | * Walk the user copy of the page tables (optionally) trying to allocate |
167 | * page table pages on the way down. |
168 | * |
169 | * Returns a pointer to a P4D on success, or NULL on failure. |
170 | */ |
171 | static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) |
172 | { |
173 | pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address)); |
174 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); |
175 | |
176 | if (address < PAGE_OFFSET) { |
177 | WARN_ONCE(1, "attempt to walk user address\n" ); |
178 | return NULL; |
179 | } |
180 | |
181 | if (pgd_none(pgd: *pgd)) { |
182 | unsigned long new_p4d_page = __get_free_page(gfp); |
183 | if (WARN_ON_ONCE(!new_p4d_page)) |
184 | return NULL; |
185 | |
186 | set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page))); |
187 | } |
188 | BUILD_BUG_ON(pgd_leaf(*pgd) != 0); |
189 | |
190 | return p4d_offset(pgd, address); |
191 | } |
192 | |
193 | /* |
194 | * Walk the user copy of the page tables (optionally) trying to allocate |
195 | * page table pages on the way down. |
196 | * |
197 | * Returns a pointer to a PMD on success, or NULL on failure. |
198 | */ |
199 | static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) |
200 | { |
201 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); |
202 | p4d_t *p4d; |
203 | pud_t *pud; |
204 | |
205 | p4d = pti_user_pagetable_walk_p4d(address); |
206 | if (!p4d) |
207 | return NULL; |
208 | |
209 | BUILD_BUG_ON(p4d_leaf(*p4d) != 0); |
210 | if (p4d_none(p4d: *p4d)) { |
211 | unsigned long new_pud_page = __get_free_page(gfp); |
212 | if (WARN_ON_ONCE(!new_pud_page)) |
213 | return NULL; |
214 | |
215 | set_p4d(p4dp: p4d, p4d: __p4d(_KERNPG_TABLE | __pa(new_pud_page))); |
216 | } |
217 | |
218 | pud = pud_offset(p4d, address); |
219 | /* The user page tables do not use large mappings: */ |
220 | if (pud_leaf(pud: *pud)) { |
221 | WARN_ON(1); |
222 | return NULL; |
223 | } |
224 | if (pud_none(pud: *pud)) { |
225 | unsigned long new_pmd_page = __get_free_page(gfp); |
226 | if (WARN_ON_ONCE(!new_pmd_page)) |
227 | return NULL; |
228 | |
229 | set_pud(pudp: pud, pud: __pud(_KERNPG_TABLE | __pa(new_pmd_page))); |
230 | } |
231 | |
232 | return pmd_offset(pud, address); |
233 | } |
234 | |
235 | /* |
236 | * Walk the shadow copy of the page tables (optionally) trying to allocate |
237 | * page table pages on the way down. Does not support large pages. |
238 | * |
239 | * Note: this is only used when mapping *new* kernel data into the |
240 | * user/shadow page tables. It is never used for userspace data. |
241 | * |
242 | * Returns a pointer to a PTE on success, or NULL on failure. |
243 | */ |
244 | static pte_t *pti_user_pagetable_walk_pte(unsigned long address) |
245 | { |
246 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); |
247 | pmd_t *pmd; |
248 | pte_t *pte; |
249 | |
250 | pmd = pti_user_pagetable_walk_pmd(address); |
251 | if (!pmd) |
252 | return NULL; |
253 | |
254 | /* We can't do anything sensible if we hit a large mapping. */ |
255 | if (pmd_leaf(pte: *pmd)) { |
256 | WARN_ON(1); |
257 | return NULL; |
258 | } |
259 | |
260 | if (pmd_none(pmd: *pmd)) { |
261 | unsigned long new_pte_page = __get_free_page(gfp); |
262 | if (!new_pte_page) |
263 | return NULL; |
264 | |
265 | set_pmd(pmdp: pmd, pmd: __pmd(_KERNPG_TABLE | __pa(new_pte_page))); |
266 | } |
267 | |
268 | pte = pte_offset_kernel(pmd, address); |
269 | if (pte_flags(pte: *pte) & _PAGE_USER) { |
270 | WARN_ONCE(1, "attempt to walk to user pte\n" ); |
271 | return NULL; |
272 | } |
273 | return pte; |
274 | } |
275 | |
276 | #ifdef CONFIG_X86_VSYSCALL_EMULATION |
277 | static void __init pti_setup_vsyscall(void) |
278 | { |
279 | pte_t *pte, *target_pte; |
280 | unsigned int level; |
281 | |
282 | pte = lookup_address(VSYSCALL_ADDR, level: &level); |
283 | if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(pte: *pte)) |
284 | return; |
285 | |
286 | target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR); |
287 | if (WARN_ON(!target_pte)) |
288 | return; |
289 | |
290 | *target_pte = *pte; |
291 | set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir)); |
292 | } |
293 | #else |
294 | static void __init pti_setup_vsyscall(void) { } |
295 | #endif |
296 | |
297 | enum pti_clone_level { |
298 | PTI_CLONE_PMD, |
299 | PTI_CLONE_PTE, |
300 | }; |
301 | |
302 | static void |
303 | pti_clone_pgtable(unsigned long start, unsigned long end, |
304 | enum pti_clone_level level) |
305 | { |
306 | unsigned long addr; |
307 | |
308 | /* |
309 | * Clone the populated PMDs which cover start to end. These PMD areas |
310 | * can have holes. |
311 | */ |
312 | for (addr = start; addr < end;) { |
313 | pte_t *pte, *target_pte; |
314 | pmd_t *pmd, *target_pmd; |
315 | pgd_t *pgd; |
316 | p4d_t *p4d; |
317 | pud_t *pud; |
318 | |
319 | /* Overflow check */ |
320 | if (addr < start) |
321 | break; |
322 | |
323 | pgd = pgd_offset_k(addr); |
324 | if (WARN_ON(pgd_none(*pgd))) |
325 | return; |
326 | p4d = p4d_offset(pgd, address: addr); |
327 | if (WARN_ON(p4d_none(*p4d))) |
328 | return; |
329 | |
330 | pud = pud_offset(p4d, address: addr); |
331 | if (pud_none(pud: *pud)) { |
332 | WARN_ON_ONCE(addr & ~PUD_MASK); |
333 | addr = round_up(addr + 1, PUD_SIZE); |
334 | continue; |
335 | } |
336 | |
337 | pmd = pmd_offset(pud, address: addr); |
338 | if (pmd_none(pmd: *pmd)) { |
339 | WARN_ON_ONCE(addr & ~PMD_MASK); |
340 | addr = round_up(addr + 1, PMD_SIZE); |
341 | continue; |
342 | } |
343 | |
344 | if (pmd_leaf(pte: *pmd) || level == PTI_CLONE_PMD) { |
345 | target_pmd = pti_user_pagetable_walk_pmd(address: addr); |
346 | if (WARN_ON(!target_pmd)) |
347 | return; |
348 | |
349 | /* |
350 | * Only clone present PMDs. This ensures only setting |
351 | * _PAGE_GLOBAL on present PMDs. This should only be |
352 | * called on well-known addresses anyway, so a non- |
353 | * present PMD would be a surprise. |
354 | */ |
355 | if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT))) |
356 | return; |
357 | |
358 | /* |
359 | * Setting 'target_pmd' below creates a mapping in both |
360 | * the user and kernel page tables. It is effectively |
361 | * global, so set it as global in both copies. Note: |
362 | * the X86_FEATURE_PGE check is not _required_ because |
363 | * the CPU ignores _PAGE_GLOBAL when PGE is not |
364 | * supported. The check keeps consistency with |
365 | * code that only set this bit when supported. |
366 | */ |
367 | if (boot_cpu_has(X86_FEATURE_PGE)) |
368 | *pmd = pmd_set_flags(pmd: *pmd, _PAGE_GLOBAL); |
369 | |
370 | /* |
371 | * Copy the PMD. That is, the kernelmode and usermode |
372 | * tables will share the last-level page tables of this |
373 | * address range |
374 | */ |
375 | *target_pmd = *pmd; |
376 | |
377 | addr += PMD_SIZE; |
378 | |
379 | } else if (level == PTI_CLONE_PTE) { |
380 | |
381 | /* Walk the page-table down to the pte level */ |
382 | pte = pte_offset_kernel(pmd, address: addr); |
383 | if (pte_none(pte: *pte)) { |
384 | addr += PAGE_SIZE; |
385 | continue; |
386 | } |
387 | |
388 | /* Only clone present PTEs */ |
389 | if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT))) |
390 | return; |
391 | |
392 | /* Allocate PTE in the user page-table */ |
393 | target_pte = pti_user_pagetable_walk_pte(address: addr); |
394 | if (WARN_ON(!target_pte)) |
395 | return; |
396 | |
397 | /* Set GLOBAL bit in both PTEs */ |
398 | if (boot_cpu_has(X86_FEATURE_PGE)) |
399 | *pte = pte_set_flags(pte: *pte, _PAGE_GLOBAL); |
400 | |
401 | /* Clone the PTE */ |
402 | *target_pte = *pte; |
403 | |
404 | addr += PAGE_SIZE; |
405 | |
406 | } else { |
407 | BUG(); |
408 | } |
409 | } |
410 | } |
411 | |
412 | #ifdef CONFIG_X86_64 |
413 | /* |
414 | * Clone a single p4d (i.e. a top-level entry on 4-level systems and a |
415 | * next-level entry on 5-level systems. |
416 | */ |
417 | static void __init pti_clone_p4d(unsigned long addr) |
418 | { |
419 | p4d_t *kernel_p4d, *user_p4d; |
420 | pgd_t *kernel_pgd; |
421 | |
422 | user_p4d = pti_user_pagetable_walk_p4d(address: addr); |
423 | if (!user_p4d) |
424 | return; |
425 | |
426 | kernel_pgd = pgd_offset_k(addr); |
427 | kernel_p4d = p4d_offset(pgd: kernel_pgd, address: addr); |
428 | *user_p4d = *kernel_p4d; |
429 | } |
430 | |
431 | /* |
432 | * Clone the CPU_ENTRY_AREA and associated data into the user space visible |
433 | * page table. |
434 | */ |
435 | static void __init pti_clone_user_shared(void) |
436 | { |
437 | unsigned int cpu; |
438 | |
439 | pti_clone_p4d(CPU_ENTRY_AREA_BASE); |
440 | |
441 | for_each_possible_cpu(cpu) { |
442 | /* |
443 | * The SYSCALL64 entry code needs one word of scratch space |
444 | * in which to spill a register. It lives in the sp2 slot |
445 | * of the CPU's TSS. |
446 | * |
447 | * This is done for all possible CPUs during boot to ensure |
448 | * that it's propagated to all mms. |
449 | */ |
450 | |
451 | unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu); |
452 | phys_addr_t pa = per_cpu_ptr_to_phys(addr: (void *)va); |
453 | pte_t *target_pte; |
454 | |
455 | target_pte = pti_user_pagetable_walk_pte(address: va); |
456 | if (WARN_ON(!target_pte)) |
457 | return; |
458 | |
459 | *target_pte = pfn_pte(page_nr: pa >> PAGE_SHIFT, PAGE_KERNEL); |
460 | } |
461 | } |
462 | |
463 | #else /* CONFIG_X86_64 */ |
464 | |
465 | /* |
466 | * On 32 bit PAE systems with 1GB of Kernel address space there is only |
467 | * one pgd/p4d for the whole kernel. Cloning that would map the whole |
468 | * address space into the user page-tables, making PTI useless. So clone |
469 | * the page-table on the PMD level to prevent that. |
470 | */ |
471 | static void __init pti_clone_user_shared(void) |
472 | { |
473 | unsigned long start, end; |
474 | |
475 | start = CPU_ENTRY_AREA_BASE; |
476 | end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES); |
477 | |
478 | pti_clone_pgtable(start, end, PTI_CLONE_PMD); |
479 | } |
480 | #endif /* CONFIG_X86_64 */ |
481 | |
482 | /* |
483 | * Clone the ESPFIX P4D into the user space visible page table |
484 | */ |
485 | static void __init pti_setup_espfix64(void) |
486 | { |
487 | #ifdef CONFIG_X86_ESPFIX64 |
488 | pti_clone_p4d(ESPFIX_BASE_ADDR); |
489 | #endif |
490 | } |
491 | |
492 | /* |
493 | * Clone the populated PMDs of the entry text and force it RO. |
494 | */ |
495 | static void pti_clone_entry_text(void) |
496 | { |
497 | pti_clone_pgtable(start: (unsigned long) __entry_text_start, |
498 | end: (unsigned long) __entry_text_end, |
499 | level: PTI_CLONE_PMD); |
500 | } |
501 | |
502 | /* |
503 | * Global pages and PCIDs are both ways to make kernel TLB entries |
504 | * live longer, reduce TLB misses and improve kernel performance. |
505 | * But, leaving all kernel text Global makes it potentially accessible |
506 | * to Meltdown-style attacks which make it trivial to find gadgets or |
507 | * defeat KASLR. |
508 | * |
509 | * Only use global pages when it is really worth it. |
510 | */ |
511 | static inline bool pti_kernel_image_global_ok(void) |
512 | { |
513 | /* |
514 | * Systems with PCIDs get little benefit from global |
515 | * kernel text and are not worth the downsides. |
516 | */ |
517 | if (cpu_feature_enabled(X86_FEATURE_PCID)) |
518 | return false; |
519 | |
520 | /* |
521 | * Only do global kernel image for pti=auto. Do the most |
522 | * secure thing (not global) if pti=on specified. |
523 | */ |
524 | if (pti_mode != PTI_AUTO) |
525 | return false; |
526 | |
527 | /* |
528 | * K8 may not tolerate the cleared _PAGE_RW on the userspace |
529 | * global kernel image pages. Do the safe thing (disable |
530 | * global kernel image). This is unlikely to ever be |
531 | * noticed because PTI is disabled by default on AMD CPUs. |
532 | */ |
533 | if (boot_cpu_has(X86_FEATURE_K8)) |
534 | return false; |
535 | |
536 | /* |
537 | * RANDSTRUCT derives its hardening benefits from the |
538 | * attacker's lack of knowledge about the layout of kernel |
539 | * data structures. Keep the kernel image non-global in |
540 | * cases where RANDSTRUCT is in use to help keep the layout a |
541 | * secret. |
542 | */ |
543 | if (IS_ENABLED(CONFIG_RANDSTRUCT)) |
544 | return false; |
545 | |
546 | return true; |
547 | } |
548 | |
549 | /* |
550 | * For some configurations, map all of kernel text into the user page |
551 | * tables. This reduces TLB misses, especially on non-PCID systems. |
552 | */ |
553 | static void pti_clone_kernel_text(void) |
554 | { |
555 | /* |
556 | * rodata is part of the kernel image and is normally |
557 | * readable on the filesystem or on the web. But, do not |
558 | * clone the areas past rodata, they might contain secrets. |
559 | */ |
560 | unsigned long start = PFN_ALIGN(_text); |
561 | unsigned long end_clone = (unsigned long)__end_rodata_aligned; |
562 | unsigned long end_global = PFN_ALIGN((unsigned long)_etext); |
563 | |
564 | if (!pti_kernel_image_global_ok()) |
565 | return; |
566 | |
567 | pr_debug("mapping partial kernel image into user address space\n" ); |
568 | |
569 | /* |
570 | * Note that this will undo _some_ of the work that |
571 | * pti_set_kernel_image_nonglobal() did to clear the |
572 | * global bit. |
573 | */ |
574 | pti_clone_pgtable(start, end: end_clone, PTI_LEVEL_KERNEL_IMAGE); |
575 | |
576 | /* |
577 | * pti_clone_pgtable() will set the global bit in any PMDs |
578 | * that it clones, but we also need to get any PTEs in |
579 | * the last level for areas that are not huge-page-aligned. |
580 | */ |
581 | |
582 | /* Set the global bit for normal non-__init kernel text: */ |
583 | set_memory_global(addr: start, numpages: (end_global - start) >> PAGE_SHIFT); |
584 | } |
585 | |
586 | static void pti_set_kernel_image_nonglobal(void) |
587 | { |
588 | /* |
589 | * The identity map is created with PMDs, regardless of the |
590 | * actual length of the kernel. We need to clear |
591 | * _PAGE_GLOBAL up to a PMD boundary, not just to the end |
592 | * of the image. |
593 | */ |
594 | unsigned long start = PFN_ALIGN(_text); |
595 | unsigned long end = ALIGN((unsigned long)_end, PMD_SIZE); |
596 | |
597 | /* |
598 | * This clears _PAGE_GLOBAL from the entire kernel image. |
599 | * pti_clone_kernel_text() map put _PAGE_GLOBAL back for |
600 | * areas that are mapped to userspace. |
601 | */ |
602 | set_memory_nonglobal(addr: start, numpages: (end - start) >> PAGE_SHIFT); |
603 | } |
604 | |
605 | /* |
606 | * Initialize kernel page table isolation |
607 | */ |
608 | void __init pti_init(void) |
609 | { |
610 | if (!boot_cpu_has(X86_FEATURE_PTI)) |
611 | return; |
612 | |
613 | pr_info("enabled\n" ); |
614 | |
615 | #ifdef CONFIG_X86_32 |
616 | /* |
617 | * We check for X86_FEATURE_PCID here. But the init-code will |
618 | * clear the feature flag on 32 bit because the feature is not |
619 | * supported on 32 bit anyway. To print the warning we need to |
620 | * check with cpuid directly again. |
621 | */ |
622 | if (cpuid_ecx(0x1) & BIT(17)) { |
623 | /* Use printk to work around pr_fmt() */ |
624 | printk(KERN_WARNING "\n" ); |
625 | printk(KERN_WARNING "************************************************************\n" ); |
626 | printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n" ); |
627 | printk(KERN_WARNING "** **\n" ); |
628 | printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n" ); |
629 | printk(KERN_WARNING "** Your performance will increase dramatically if you **\n" ); |
630 | printk(KERN_WARNING "** switch to a 64-bit kernel! **\n" ); |
631 | printk(KERN_WARNING "** **\n" ); |
632 | printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n" ); |
633 | printk(KERN_WARNING "************************************************************\n" ); |
634 | } |
635 | #endif |
636 | |
637 | pti_clone_user_shared(); |
638 | |
639 | /* Undo all global bits from the init pagetables in head_64.S: */ |
640 | pti_set_kernel_image_nonglobal(); |
641 | /* Replace some of the global bits just for shared entry text: */ |
642 | pti_clone_entry_text(); |
643 | pti_setup_espfix64(); |
644 | pti_setup_vsyscall(); |
645 | } |
646 | |
647 | /* |
648 | * Finalize the kernel mappings in the userspace page-table. Some of the |
649 | * mappings for the kernel image might have changed since pti_init() |
650 | * cloned them. This is because parts of the kernel image have been |
651 | * mapped RO and/or NX. These changes need to be cloned again to the |
652 | * userspace page-table. |
653 | */ |
654 | void pti_finalize(void) |
655 | { |
656 | if (!boot_cpu_has(X86_FEATURE_PTI)) |
657 | return; |
658 | /* |
659 | * We need to clone everything (again) that maps parts of the |
660 | * kernel image. |
661 | */ |
662 | pti_clone_entry_text(); |
663 | pti_clone_kernel_text(); |
664 | |
665 | debug_checkwx_user(); |
666 | } |
667 | |