1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_TLBFLUSH_H
3#define _ASM_X86_TLBFLUSH_H
4
5#include <linux/mm.h>
6#include <linux/sched.h>
7
8#include <asm/processor.h>
9#include <asm/cpufeature.h>
10#include <asm/special_insns.h>
11#include <asm/smp.h>
12#include <asm/invpcid.h>
13#include <asm/pti.h>
14#include <asm/processor-flags.h>
15
16/*
17 * The x86 feature is called PCID (Process Context IDentifier). It is similar
18 * to what is traditionally called ASID on the RISC processors.
19 *
20 * We don't use the traditional ASID implementation, where each process/mm gets
21 * its own ASID and flush/restart when we run out of ASID space.
22 *
23 * Instead we have a small per-cpu array of ASIDs and cache the last few mm's
24 * that came by on this CPU, allowing cheaper switch_mm between processes on
25 * this CPU.
26 *
27 * We end up with different spaces for different things. To avoid confusion we
28 * use different names for each of them:
29 *
30 * ASID - [0, TLB_NR_DYN_ASIDS-1]
31 * the canonical identifier for an mm
32 *
33 * kPCID - [1, TLB_NR_DYN_ASIDS]
34 * the value we write into the PCID part of CR3; corresponds to the
35 * ASID+1, because PCID 0 is special.
36 *
37 * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
38 * for KPTI each mm has two address spaces and thus needs two
39 * PCID values, but we can still do with a single ASID denomination
40 * for each mm. Corresponds to kPCID + 2048.
41 *
42 */
43
44/* There are 12 bits of space for ASIDS in CR3 */
45#define CR3_HW_ASID_BITS 12
46
47/*
48 * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
49 * user/kernel switches
50 */
51#ifdef CONFIG_PAGE_TABLE_ISOLATION
52# define PTI_CONSUMED_PCID_BITS 1
53#else
54# define PTI_CONSUMED_PCID_BITS 0
55#endif
56
57#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
58
59/*
60 * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
61 * for them being zero-based. Another -1 is because PCID 0 is reserved for
62 * use by non-PCID-aware users.
63 */
64#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2)
65
66/*
67 * 6 because 6 should be plenty and struct tlb_state will fit in two cache
68 * lines.
69 */
70#define TLB_NR_DYN_ASIDS 6
71
72/*
73 * Given @asid, compute kPCID
74 */
75static inline u16 kern_pcid(u16 asid)
76{
77 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
78
79#ifdef CONFIG_PAGE_TABLE_ISOLATION
80 /*
81 * Make sure that the dynamic ASID space does not confict with the
82 * bit we are using to switch between user and kernel ASIDs.
83 */
84 BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
85
86 /*
87 * The ASID being passed in here should have respected the
88 * MAX_ASID_AVAILABLE and thus never have the switch bit set.
89 */
90 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
91#endif
92 /*
93 * The dynamically-assigned ASIDs that get passed in are small
94 * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set,
95 * so do not bother to clear it.
96 *
97 * If PCID is on, ASID-aware code paths put the ASID+1 into the
98 * PCID bits. This serves two purposes. It prevents a nasty
99 * situation in which PCID-unaware code saves CR3, loads some other
100 * value (with PCID == 0), and then restores CR3, thus corrupting
101 * the TLB for ASID 0 if the saved ASID was nonzero. It also means
102 * that any bugs involving loading a PCID-enabled CR3 with
103 * CR4.PCIDE off will trigger deterministically.
104 */
105 return asid + 1;
106}
107
108/*
109 * Given @asid, compute uPCID
110 */
111static inline u16 user_pcid(u16 asid)
112{
113 u16 ret = kern_pcid(asid);
114#ifdef CONFIG_PAGE_TABLE_ISOLATION
115 ret |= 1 << X86_CR3_PTI_PCID_USER_BIT;
116#endif
117 return ret;
118}
119
120struct pgd_t;
121static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
122{
123 if (static_cpu_has(X86_FEATURE_PCID)) {
124 return __sme_pa(pgd) | kern_pcid(asid);
125 } else {
126 VM_WARN_ON_ONCE(asid != 0);
127 return __sme_pa(pgd);
128 }
129}
130
131static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
132{
133 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
134 /*
135 * Use boot_cpu_has() instead of this_cpu_has() as this function
136 * might be called during early boot. This should work even after
137 * boot because all CPU's the have same capabilities:
138 */
139 VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID));
140 return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
141}
142
143#ifdef CONFIG_PARAVIRT
144#include <asm/paravirt.h>
145#else
146#define __flush_tlb() __native_flush_tlb()
147#define __flush_tlb_global() __native_flush_tlb_global()
148#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
149#endif
150
151struct tlb_context {
152 u64 ctx_id;
153 u64 tlb_gen;
154};
155
156struct tlb_state {
157 /*
158 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
159 * are on. This means that it may not match current->active_mm,
160 * which will contain the previous user mm when we're in lazy TLB
161 * mode even if we've already switched back to swapper_pg_dir.
162 *
163 * During switch_mm_irqs_off(), loaded_mm will be set to
164 * LOADED_MM_SWITCHING during the brief interrupts-off window
165 * when CR3 and loaded_mm would otherwise be inconsistent. This
166 * is for nmi_uaccess_okay()'s benefit.
167 */
168 struct mm_struct *loaded_mm;
169
170#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
171
172 /* Last user mm for optimizing IBPB */
173 union {
174 struct mm_struct *last_user_mm;
175 unsigned long last_user_mm_ibpb;
176 };
177
178 u16 loaded_mm_asid;
179 u16 next_asid;
180
181 /*
182 * We can be in one of several states:
183 *
184 * - Actively using an mm. Our CPU's bit will be set in
185 * mm_cpumask(loaded_mm) and is_lazy == false;
186 *
187 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
188 * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
189 *
190 * - Lazily using a real mm. loaded_mm != &init_mm, our bit
191 * is set in mm_cpumask(loaded_mm), but is_lazy == true.
192 * We're heuristically guessing that the CR3 load we
193 * skipped more than makes up for the overhead added by
194 * lazy mode.
195 */
196 bool is_lazy;
197
198 /*
199 * If set we changed the page tables in such a way that we
200 * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
201 * This tells us to go invalidate all the non-loaded ctxs[]
202 * on the next context switch.
203 *
204 * The current ctx was kept up-to-date as it ran and does not
205 * need to be invalidated.
206 */
207 bool invalidate_other;
208
209 /*
210 * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
211 * the corresponding user PCID needs a flush next time we
212 * switch to it; see SWITCH_TO_USER_CR3.
213 */
214 unsigned short user_pcid_flush_mask;
215
216 /*
217 * Access to this CR4 shadow and to H/W CR4 is protected by
218 * disabling interrupts when modifying either one.
219 */
220 unsigned long cr4;
221
222 /*
223 * This is a list of all contexts that might exist in the TLB.
224 * There is one per ASID that we use, and the ASID (what the
225 * CPU calls PCID) is the index into ctxts.
226 *
227 * For each context, ctx_id indicates which mm the TLB's user
228 * entries came from. As an invariant, the TLB will never
229 * contain entries that are out-of-date as when that mm reached
230 * the tlb_gen in the list.
231 *
232 * To be clear, this means that it's legal for the TLB code to
233 * flush the TLB without updating tlb_gen. This can happen
234 * (for now, at least) due to paravirt remote flushes.
235 *
236 * NB: context 0 is a bit special, since it's also used by
237 * various bits of init code. This is fine -- code that
238 * isn't aware of PCID will end up harmlessly flushing
239 * context 0.
240 */
241 struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
242};
243DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
244
245/*
246 * Blindly accessing user memory from NMI context can be dangerous
247 * if we're in the middle of switching the current user task or
248 * switching the loaded mm. It can also be dangerous if we
249 * interrupted some kernel code that was temporarily using a
250 * different mm.
251 */
252static inline bool nmi_uaccess_okay(void)
253{
254 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
255 struct mm_struct *current_mm = current->mm;
256
257 VM_WARN_ON_ONCE(!loaded_mm);
258
259 /*
260 * The condition we want to check is
261 * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
262 * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
263 * is supposed to be reasonably fast.
264 *
265 * Instead, we check the almost equivalent but somewhat conservative
266 * condition below, and we rely on the fact that switch_mm_irqs_off()
267 * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
268 */
269 if (loaded_mm != current_mm)
270 return false;
271
272 VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
273
274 return true;
275}
276
277/* Initialize cr4 shadow for this CPU. */
278static inline void cr4_init_shadow(void)
279{
280 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
281}
282
283static inline void __cr4_set(unsigned long cr4)
284{
285 lockdep_assert_irqs_disabled();
286 this_cpu_write(cpu_tlbstate.cr4, cr4);
287 __write_cr4(cr4);
288}
289
290/* Set in this cpu's CR4. */
291static inline void cr4_set_bits(unsigned long mask)
292{
293 unsigned long cr4, flags;
294
295 local_irq_save(flags);
296 cr4 = this_cpu_read(cpu_tlbstate.cr4);
297 if ((cr4 | mask) != cr4)
298 __cr4_set(cr4 | mask);
299 local_irq_restore(flags);
300}
301
302/* Clear in this cpu's CR4. */
303static inline void cr4_clear_bits(unsigned long mask)
304{
305 unsigned long cr4, flags;
306
307 local_irq_save(flags);
308 cr4 = this_cpu_read(cpu_tlbstate.cr4);
309 if ((cr4 & ~mask) != cr4)
310 __cr4_set(cr4 & ~mask);
311 local_irq_restore(flags);
312}
313
314static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
315{
316 unsigned long cr4;
317
318 cr4 = this_cpu_read(cpu_tlbstate.cr4);
319 __cr4_set(cr4 ^ mask);
320}
321
322/* Read the CR4 shadow. */
323static inline unsigned long cr4_read_shadow(void)
324{
325 return this_cpu_read(cpu_tlbstate.cr4);
326}
327
328/*
329 * Mark all other ASIDs as invalid, preserves the current.
330 */
331static inline void invalidate_other_asid(void)
332{
333 this_cpu_write(cpu_tlbstate.invalidate_other, true);
334}
335
336/*
337 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
338 * enable and PPro Global page enable), so that any CPU's that boot
339 * up after us can get the correct flags. This should only be used
340 * during boot on the boot cpu.
341 */
342extern unsigned long mmu_cr4_features;
343extern u32 *trampoline_cr4_features;
344
345static inline void cr4_set_bits_and_update_boot(unsigned long mask)
346{
347 mmu_cr4_features |= mask;
348 if (trampoline_cr4_features)
349 *trampoline_cr4_features = mmu_cr4_features;
350 cr4_set_bits(mask);
351}
352
353extern void initialize_tlbstate_and_flush(void);
354
355/*
356 * Given an ASID, flush the corresponding user ASID. We can delay this
357 * until the next time we switch to it.
358 *
359 * See SWITCH_TO_USER_CR3.
360 */
361static inline void invalidate_user_asid(u16 asid)
362{
363 /* There is no user ASID if address space separation is off */
364 if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
365 return;
366
367 /*
368 * We only have a single ASID if PCID is off and the CR3
369 * write will have flushed it.
370 */
371 if (!cpu_feature_enabled(X86_FEATURE_PCID))
372 return;
373
374 if (!static_cpu_has(X86_FEATURE_PTI))
375 return;
376
377 __set_bit(kern_pcid(asid),
378 (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
379}
380
381/*
382 * flush the entire current user mapping
383 */
384static inline void __native_flush_tlb(void)
385{
386 /*
387 * Preemption or interrupts must be disabled to protect the access
388 * to the per CPU variable and to prevent being preempted between
389 * read_cr3() and write_cr3().
390 */
391 WARN_ON_ONCE(preemptible());
392
393 invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
394
395 /* If current->mm == NULL then the read_cr3() "borrows" an mm */
396 native_write_cr3(__native_read_cr3());
397}
398
399/*
400 * flush everything
401 */
402static inline void __native_flush_tlb_global(void)
403{
404 unsigned long cr4, flags;
405
406 if (static_cpu_has(X86_FEATURE_INVPCID)) {
407 /*
408 * Using INVPCID is considerably faster than a pair of writes
409 * to CR4 sandwiched inside an IRQ flag save/restore.
410 *
411 * Note, this works with CR4.PCIDE=0 or 1.
412 */
413 invpcid_flush_all();
414 return;
415 }
416
417 /*
418 * Read-modify-write to CR4 - protect it from preemption and
419 * from interrupts. (Use the raw variant because this code can
420 * be called from deep inside debugging code.)
421 */
422 raw_local_irq_save(flags);
423
424 cr4 = this_cpu_read(cpu_tlbstate.cr4);
425 /* toggle PGE */
426 native_write_cr4(cr4 ^ X86_CR4_PGE);
427 /* write old PGE again and flush TLBs */
428 native_write_cr4(cr4);
429
430 raw_local_irq_restore(flags);
431}
432
433/*
434 * flush one page in the user mapping
435 */
436static inline void __native_flush_tlb_one_user(unsigned long addr)
437{
438 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
439
440 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
441
442 if (!static_cpu_has(X86_FEATURE_PTI))
443 return;
444
445 /*
446 * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1.
447 * Just use invalidate_user_asid() in case we are called early.
448 */
449 if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE))
450 invalidate_user_asid(loaded_mm_asid);
451 else
452 invpcid_flush_one(user_pcid(loaded_mm_asid), addr);
453}
454
455/*
456 * flush everything
457 */
458static inline void __flush_tlb_all(void)
459{
460 /*
461 * This is to catch users with enabled preemption and the PGE feature
462 * and don't trigger the warning in __native_flush_tlb().
463 */
464 VM_WARN_ON_ONCE(preemptible());
465
466 if (boot_cpu_has(X86_FEATURE_PGE)) {
467 __flush_tlb_global();
468 } else {
469 /*
470 * !PGE -> !PCID (setup_pcid()), thus every flush is total.
471 */
472 __flush_tlb();
473 }
474}
475
476/*
477 * flush one page in the kernel mapping
478 */
479static inline void __flush_tlb_one_kernel(unsigned long addr)
480{
481 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
482
483 /*
484 * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
485 * paravirt equivalent. Even with PCID, this is sufficient: we only
486 * use PCID if we also use global PTEs for the kernel mapping, and
487 * INVLPG flushes global translations across all address spaces.
488 *
489 * If PTI is on, then the kernel is mapped with non-global PTEs, and
490 * __flush_tlb_one_user() will flush the given address for the current
491 * kernel address space and for its usermode counterpart, but it does
492 * not flush it for other address spaces.
493 */
494 __flush_tlb_one_user(addr);
495
496 if (!static_cpu_has(X86_FEATURE_PTI))
497 return;
498
499 /*
500 * See above. We need to propagate the flush to all other address
501 * spaces. In principle, we only need to propagate it to kernelmode
502 * address spaces, but the extra bookkeeping we would need is not
503 * worth it.
504 */
505 invalidate_other_asid();
506}
507
508#define TLB_FLUSH_ALL -1UL
509
510/*
511 * TLB flushing:
512 *
513 * - flush_tlb_all() flushes all processes TLBs
514 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
515 * - flush_tlb_page(vma, vmaddr) flushes one page
516 * - flush_tlb_range(vma, start, end) flushes a range of pages
517 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
518 * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
519 *
520 * ..but the i386 has somewhat limited tlb flushing capabilities,
521 * and page-granular flushes are available only on i486 and up.
522 */
523struct flush_tlb_info {
524 /*
525 * We support several kinds of flushes.
526 *
527 * - Fully flush a single mm. .mm will be set, .end will be
528 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
529 * which the IPI sender is trying to catch us up.
530 *
531 * - Partially flush a single mm. .mm will be set, .start and
532 * .end will indicate the range, and .new_tlb_gen will be set
533 * such that the changes between generation .new_tlb_gen-1 and
534 * .new_tlb_gen are entirely contained in the indicated range.
535 *
536 * - Fully flush all mms whose tlb_gens have been updated. .mm
537 * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
538 * will be zero.
539 */
540 struct mm_struct *mm;
541 unsigned long start;
542 unsigned long end;
543 u64 new_tlb_gen;
544 unsigned int stride_shift;
545 bool freed_tables;
546};
547
548#define local_flush_tlb() __flush_tlb()
549
550#define flush_tlb_mm(mm) \
551 flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
552
553#define flush_tlb_range(vma, start, end) \
554 flush_tlb_mm_range((vma)->vm_mm, start, end, \
555 ((vma)->vm_flags & VM_HUGETLB) \
556 ? huge_page_shift(hstate_vma(vma)) \
557 : PAGE_SHIFT, false)
558
559extern void flush_tlb_all(void);
560extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
561 unsigned long end, unsigned int stride_shift,
562 bool freed_tables);
563extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
564
565static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
566{
567 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
568}
569
570void native_flush_tlb_others(const struct cpumask *cpumask,
571 const struct flush_tlb_info *info);
572
573static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
574{
575 /*
576 * Bump the generation count. This also serves as a full barrier
577 * that synchronizes with switch_mm(): callers are required to order
578 * their read of mm_cpumask after their writes to the paging
579 * structures.
580 */
581 return atomic64_inc_return(&mm->context.tlb_gen);
582}
583
584static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
585 struct mm_struct *mm)
586{
587 inc_mm_tlb_gen(mm);
588 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
589}
590
591extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
592
593#ifndef CONFIG_PARAVIRT
594#define flush_tlb_others(mask, info) \
595 native_flush_tlb_others(mask, info)
596
597#define paravirt_tlb_remove_table(tlb, page) \
598 tlb_remove_page(tlb, (void *)(page))
599#endif
600
601#endif /* _ASM_X86_TLBFLUSH_H */
602