1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | #include <linux/init.h> |
3 | |
4 | #include <linux/mm.h> |
5 | #include <linux/spinlock.h> |
6 | #include <linux/smp.h> |
7 | #include <linux/interrupt.h> |
8 | #include <linux/export.h> |
9 | #include <linux/cpu.h> |
10 | #include <linux/debugfs.h> |
11 | #include <linux/sched/smt.h> |
12 | #include <linux/task_work.h> |
13 | #include <linux/mmu_notifier.h> |
14 | |
15 | #include <asm/tlbflush.h> |
16 | #include <asm/mmu_context.h> |
17 | #include <asm/nospec-branch.h> |
18 | #include <asm/cache.h> |
19 | #include <asm/cacheflush.h> |
20 | #include <asm/apic.h> |
21 | #include <asm/perf_event.h> |
22 | |
23 | #include "mm_internal.h" |
24 | |
25 | #ifdef CONFIG_PARAVIRT |
26 | # define STATIC_NOPV |
27 | #else |
28 | # define STATIC_NOPV static |
29 | # define __flush_tlb_local native_flush_tlb_local |
30 | # define __flush_tlb_global native_flush_tlb_global |
31 | # define __flush_tlb_one_user(addr) native_flush_tlb_one_user(addr) |
32 | # define __flush_tlb_multi(msk, info) native_flush_tlb_multi(msk, info) |
33 | #endif |
34 | |
35 | /* |
36 | * TLB flushing, formerly SMP-only |
37 | * c/o Linus Torvalds. |
38 | * |
39 | * These mean you can really definitely utterly forget about |
40 | * writing to user space from interrupts. (Its not allowed anyway). |
41 | * |
42 | * Optimizations Manfred Spraul <manfred@colorfullife.com> |
43 | * |
44 | * More scalable flush, from Andi Kleen |
45 | * |
46 | * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi |
47 | */ |
48 | |
49 | /* |
50 | * Bits to mangle the TIF_SPEC_* state into the mm pointer which is |
51 | * stored in cpu_tlb_state.last_user_mm_spec. |
52 | */ |
53 | #define LAST_USER_MM_IBPB 0x1UL |
54 | #define LAST_USER_MM_L1D_FLUSH 0x2UL |
55 | #define LAST_USER_MM_SPEC_MASK (LAST_USER_MM_IBPB | LAST_USER_MM_L1D_FLUSH) |
56 | |
57 | /* Bits to set when tlbstate and flush is (re)initialized */ |
58 | #define LAST_USER_MM_INIT LAST_USER_MM_IBPB |
59 | |
60 | /* |
61 | * The x86 feature is called PCID (Process Context IDentifier). It is similar |
62 | * to what is traditionally called ASID on the RISC processors. |
63 | * |
64 | * We don't use the traditional ASID implementation, where each process/mm gets |
65 | * its own ASID and flush/restart when we run out of ASID space. |
66 | * |
67 | * Instead we have a small per-cpu array of ASIDs and cache the last few mm's |
68 | * that came by on this CPU, allowing cheaper switch_mm between processes on |
69 | * this CPU. |
70 | * |
71 | * We end up with different spaces for different things. To avoid confusion we |
72 | * use different names for each of them: |
73 | * |
74 | * ASID - [0, TLB_NR_DYN_ASIDS-1] |
75 | * the canonical identifier for an mm |
76 | * |
77 | * kPCID - [1, TLB_NR_DYN_ASIDS] |
78 | * the value we write into the PCID part of CR3; corresponds to the |
79 | * ASID+1, because PCID 0 is special. |
80 | * |
81 | * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS] |
82 | * for KPTI each mm has two address spaces and thus needs two |
83 | * PCID values, but we can still do with a single ASID denomination |
84 | * for each mm. Corresponds to kPCID + 2048. |
85 | * |
86 | */ |
87 | |
88 | /* There are 12 bits of space for ASIDS in CR3 */ |
89 | #define CR3_HW_ASID_BITS 12 |
90 | |
91 | /* |
92 | * When enabled, MITIGATION_PAGE_TABLE_ISOLATION consumes a single bit for |
93 | * user/kernel switches |
94 | */ |
95 | #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION |
96 | # define PTI_CONSUMED_PCID_BITS 1 |
97 | #else |
98 | # define PTI_CONSUMED_PCID_BITS 0 |
99 | #endif |
100 | |
101 | #define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS) |
102 | |
103 | /* |
104 | * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account |
105 | * for them being zero-based. Another -1 is because PCID 0 is reserved for |
106 | * use by non-PCID-aware users. |
107 | */ |
108 | #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2) |
109 | |
110 | /* |
111 | * Given @asid, compute kPCID |
112 | */ |
113 | static inline u16 kern_pcid(u16 asid) |
114 | { |
115 | VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); |
116 | |
117 | #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION |
118 | /* |
119 | * Make sure that the dynamic ASID space does not conflict with the |
120 | * bit we are using to switch between user and kernel ASIDs. |
121 | */ |
122 | BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT)); |
123 | |
124 | /* |
125 | * The ASID being passed in here should have respected the |
126 | * MAX_ASID_AVAILABLE and thus never have the switch bit set. |
127 | */ |
128 | VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); |
129 | #endif |
130 | /* |
131 | * The dynamically-assigned ASIDs that get passed in are small |
132 | * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set, |
133 | * so do not bother to clear it. |
134 | * |
135 | * If PCID is on, ASID-aware code paths put the ASID+1 into the |
136 | * PCID bits. This serves two purposes. It prevents a nasty |
137 | * situation in which PCID-unaware code saves CR3, loads some other |
138 | * value (with PCID == 0), and then restores CR3, thus corrupting |
139 | * the TLB for ASID 0 if the saved ASID was nonzero. It also means |
140 | * that any bugs involving loading a PCID-enabled CR3 with |
141 | * CR4.PCIDE off will trigger deterministically. |
142 | */ |
143 | return asid + 1; |
144 | } |
145 | |
146 | /* |
147 | * Given @asid, compute uPCID |
148 | */ |
149 | static inline u16 user_pcid(u16 asid) |
150 | { |
151 | u16 ret = kern_pcid(asid); |
152 | #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION |
153 | ret |= 1 << X86_CR3_PTI_PCID_USER_BIT; |
154 | #endif |
155 | return ret; |
156 | } |
157 | |
158 | static inline unsigned long build_cr3(pgd_t *pgd, u16 asid, unsigned long lam) |
159 | { |
160 | unsigned long cr3 = __sme_pa(pgd) | lam; |
161 | |
162 | if (static_cpu_has(X86_FEATURE_PCID)) { |
163 | VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); |
164 | cr3 |= kern_pcid(asid); |
165 | } else { |
166 | VM_WARN_ON_ONCE(asid != 0); |
167 | } |
168 | |
169 | return cr3; |
170 | } |
171 | |
172 | static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid, |
173 | unsigned long lam) |
174 | { |
175 | /* |
176 | * Use boot_cpu_has() instead of this_cpu_has() as this function |
177 | * might be called during early boot. This should work even after |
178 | * boot because all CPU's the have same capabilities: |
179 | */ |
180 | VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID)); |
181 | return build_cr3(pgd, asid, lam) | CR3_NOFLUSH; |
182 | } |
183 | |
184 | /* |
185 | * We get here when we do something requiring a TLB invalidation |
186 | * but could not go invalidate all of the contexts. We do the |
187 | * necessary invalidation by clearing out the 'ctx_id' which |
188 | * forces a TLB flush when the context is loaded. |
189 | */ |
190 | static void clear_asid_other(void) |
191 | { |
192 | u16 asid; |
193 | |
194 | /* |
195 | * This is only expected to be set if we have disabled |
196 | * kernel _PAGE_GLOBAL pages. |
197 | */ |
198 | if (!static_cpu_has(X86_FEATURE_PTI)) { |
199 | WARN_ON_ONCE(1); |
200 | return; |
201 | } |
202 | |
203 | for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { |
204 | /* Do not need to flush the current asid */ |
205 | if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid)) |
206 | continue; |
207 | /* |
208 | * Make sure the next time we go to switch to |
209 | * this asid, we do a flush: |
210 | */ |
211 | this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0); |
212 | } |
213 | this_cpu_write(cpu_tlbstate.invalidate_other, false); |
214 | } |
215 | |
216 | atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); |
217 | |
218 | |
219 | static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, |
220 | u16 *new_asid, bool *need_flush) |
221 | { |
222 | u16 asid; |
223 | |
224 | if (!static_cpu_has(X86_FEATURE_PCID)) { |
225 | *new_asid = 0; |
226 | *need_flush = true; |
227 | return; |
228 | } |
229 | |
230 | if (this_cpu_read(cpu_tlbstate.invalidate_other)) |
231 | clear_asid_other(); |
232 | |
233 | for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { |
234 | if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != |
235 | next->context.ctx_id) |
236 | continue; |
237 | |
238 | *new_asid = asid; |
239 | *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < |
240 | next_tlb_gen); |
241 | return; |
242 | } |
243 | |
244 | /* |
245 | * We don't currently own an ASID slot on this CPU. |
246 | * Allocate a slot. |
247 | */ |
248 | *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1; |
249 | if (*new_asid >= TLB_NR_DYN_ASIDS) { |
250 | *new_asid = 0; |
251 | this_cpu_write(cpu_tlbstate.next_asid, 1); |
252 | } |
253 | *need_flush = true; |
254 | } |
255 | |
256 | /* |
257 | * Given an ASID, flush the corresponding user ASID. We can delay this |
258 | * until the next time we switch to it. |
259 | * |
260 | * See SWITCH_TO_USER_CR3. |
261 | */ |
262 | static inline void invalidate_user_asid(u16 asid) |
263 | { |
264 | /* There is no user ASID if address space separation is off */ |
265 | if (!IS_ENABLED(CONFIG_MITIGATION_PAGE_TABLE_ISOLATION)) |
266 | return; |
267 | |
268 | /* |
269 | * We only have a single ASID if PCID is off and the CR3 |
270 | * write will have flushed it. |
271 | */ |
272 | if (!cpu_feature_enabled(X86_FEATURE_PCID)) |
273 | return; |
274 | |
275 | if (!static_cpu_has(X86_FEATURE_PTI)) |
276 | return; |
277 | |
278 | __set_bit(kern_pcid(asid), |
279 | (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask)); |
280 | } |
281 | |
282 | static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, unsigned long lam, |
283 | bool need_flush) |
284 | { |
285 | unsigned long new_mm_cr3; |
286 | |
287 | if (need_flush) { |
288 | invalidate_user_asid(asid: new_asid); |
289 | new_mm_cr3 = build_cr3(pgd: pgdir, asid: new_asid, lam); |
290 | } else { |
291 | new_mm_cr3 = build_cr3_noflush(pgd: pgdir, asid: new_asid, lam); |
292 | } |
293 | |
294 | /* |
295 | * Caution: many callers of this function expect |
296 | * that load_cr3() is serializing and orders TLB |
297 | * fills with respect to the mm_cpumask writes. |
298 | */ |
299 | write_cr3(x: new_mm_cr3); |
300 | } |
301 | |
302 | void leave_mm(void) |
303 | { |
304 | struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); |
305 | |
306 | /* |
307 | * It's plausible that we're in lazy TLB mode while our mm is init_mm. |
308 | * If so, our callers still expect us to flush the TLB, but there |
309 | * aren't any user TLB entries in init_mm to worry about. |
310 | * |
311 | * This needs to happen before any other sanity checks due to |
312 | * intel_idle's shenanigans. |
313 | */ |
314 | if (loaded_mm == &init_mm) |
315 | return; |
316 | |
317 | /* Warn if we're not lazy. */ |
318 | WARN_ON(!this_cpu_read(cpu_tlbstate_shared.is_lazy)); |
319 | |
320 | switch_mm(NULL, next: &init_mm, NULL); |
321 | } |
322 | EXPORT_SYMBOL_GPL(leave_mm); |
323 | |
324 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
325 | struct task_struct *tsk) |
326 | { |
327 | unsigned long flags; |
328 | |
329 | local_irq_save(flags); |
330 | switch_mm_irqs_off(NULL, next, tsk); |
331 | local_irq_restore(flags); |
332 | } |
333 | |
334 | /* |
335 | * Invoked from return to user/guest by a task that opted-in to L1D |
336 | * flushing but ended up running on an SMT enabled core due to wrong |
337 | * affinity settings or CPU hotplug. This is part of the paranoid L1D flush |
338 | * contract which this task requested. |
339 | */ |
340 | static void l1d_flush_force_sigbus(struct callback_head *ch) |
341 | { |
342 | force_sig(SIGBUS); |
343 | } |
344 | |
345 | static void l1d_flush_evaluate(unsigned long prev_mm, unsigned long next_mm, |
346 | struct task_struct *next) |
347 | { |
348 | /* Flush L1D if the outgoing task requests it */ |
349 | if (prev_mm & LAST_USER_MM_L1D_FLUSH) |
350 | wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); |
351 | |
352 | /* Check whether the incoming task opted in for L1D flush */ |
353 | if (likely(!(next_mm & LAST_USER_MM_L1D_FLUSH))) |
354 | return; |
355 | |
356 | /* |
357 | * Validate that it is not running on an SMT sibling as this would |
358 | * make the exercise pointless because the siblings share L1D. If |
359 | * it runs on a SMT sibling, notify it with SIGBUS on return to |
360 | * user/guest |
361 | */ |
362 | if (this_cpu_read(cpu_info.smt_active)) { |
363 | clear_ti_thread_flag(ti: &next->thread_info, TIF_SPEC_L1D_FLUSH); |
364 | next->l1d_flush_kill.func = l1d_flush_force_sigbus; |
365 | task_work_add(task: next, twork: &next->l1d_flush_kill, mode: TWA_RESUME); |
366 | } |
367 | } |
368 | |
369 | static unsigned long mm_mangle_tif_spec_bits(struct task_struct *next) |
370 | { |
371 | unsigned long next_tif = read_task_thread_flags(next); |
372 | unsigned long spec_bits = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_SPEC_MASK; |
373 | |
374 | /* |
375 | * Ensure that the bit shift above works as expected and the two flags |
376 | * end up in bit 0 and 1. |
377 | */ |
378 | BUILD_BUG_ON(TIF_SPEC_L1D_FLUSH != TIF_SPEC_IB + 1); |
379 | |
380 | return (unsigned long)next->mm | spec_bits; |
381 | } |
382 | |
383 | static void cond_mitigation(struct task_struct *next) |
384 | { |
385 | unsigned long prev_mm, next_mm; |
386 | |
387 | if (!next || !next->mm) |
388 | return; |
389 | |
390 | next_mm = mm_mangle_tif_spec_bits(next); |
391 | prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec); |
392 | |
393 | /* |
394 | * Avoid user/user BTB poisoning by flushing the branch predictor |
395 | * when switching between processes. This stops one process from |
396 | * doing Spectre-v2 attacks on another. |
397 | * |
398 | * Both, the conditional and the always IBPB mode use the mm |
399 | * pointer to avoid the IBPB when switching between tasks of the |
400 | * same process. Using the mm pointer instead of mm->context.ctx_id |
401 | * opens a hypothetical hole vs. mm_struct reuse, which is more or |
402 | * less impossible to control by an attacker. Aside of that it |
403 | * would only affect the first schedule so the theoretically |
404 | * exposed data is not really interesting. |
405 | */ |
406 | if (static_branch_likely(&switch_mm_cond_ibpb)) { |
407 | /* |
408 | * This is a bit more complex than the always mode because |
409 | * it has to handle two cases: |
410 | * |
411 | * 1) Switch from a user space task (potential attacker) |
412 | * which has TIF_SPEC_IB set to a user space task |
413 | * (potential victim) which has TIF_SPEC_IB not set. |
414 | * |
415 | * 2) Switch from a user space task (potential attacker) |
416 | * which has TIF_SPEC_IB not set to a user space task |
417 | * (potential victim) which has TIF_SPEC_IB set. |
418 | * |
419 | * This could be done by unconditionally issuing IBPB when |
420 | * a task which has TIF_SPEC_IB set is either scheduled in |
421 | * or out. Though that results in two flushes when: |
422 | * |
423 | * - the same user space task is scheduled out and later |
424 | * scheduled in again and only a kernel thread ran in |
425 | * between. |
426 | * |
427 | * - a user space task belonging to the same process is |
428 | * scheduled in after a kernel thread ran in between |
429 | * |
430 | * - a user space task belonging to the same process is |
431 | * scheduled in immediately. |
432 | * |
433 | * Optimize this with reasonably small overhead for the |
434 | * above cases. Mangle the TIF_SPEC_IB bit into the mm |
435 | * pointer of the incoming task which is stored in |
436 | * cpu_tlbstate.last_user_mm_spec for comparison. |
437 | * |
438 | * Issue IBPB only if the mm's are different and one or |
439 | * both have the IBPB bit set. |
440 | */ |
441 | if (next_mm != prev_mm && |
442 | (next_mm | prev_mm) & LAST_USER_MM_IBPB) |
443 | indirect_branch_prediction_barrier(); |
444 | } |
445 | |
446 | if (static_branch_unlikely(&switch_mm_always_ibpb)) { |
447 | /* |
448 | * Only flush when switching to a user space task with a |
449 | * different context than the user space task which ran |
450 | * last on this CPU. |
451 | */ |
452 | if ((prev_mm & ~LAST_USER_MM_SPEC_MASK) != |
453 | (unsigned long)next->mm) |
454 | indirect_branch_prediction_barrier(); |
455 | } |
456 | |
457 | if (static_branch_unlikely(&switch_mm_cond_l1d_flush)) { |
458 | /* |
459 | * Flush L1D when the outgoing task requested it and/or |
460 | * check whether the incoming task requested L1D flushing |
461 | * and ended up on an SMT sibling. |
462 | */ |
463 | if (unlikely((prev_mm | next_mm) & LAST_USER_MM_L1D_FLUSH)) |
464 | l1d_flush_evaluate(prev_mm, next_mm, next); |
465 | } |
466 | |
467 | this_cpu_write(cpu_tlbstate.last_user_mm_spec, next_mm); |
468 | } |
469 | |
470 | #ifdef CONFIG_PERF_EVENTS |
471 | static inline void cr4_update_pce_mm(struct mm_struct *mm) |
472 | { |
473 | if (static_branch_unlikely(&rdpmc_always_available_key) || |
474 | (!static_branch_unlikely(&rdpmc_never_available_key) && |
475 | atomic_read(v: &mm->context.perf_rdpmc_allowed))) { |
476 | /* |
477 | * Clear the existing dirty counters to |
478 | * prevent the leak for an RDPMC task. |
479 | */ |
480 | perf_clear_dirty_counters(); |
481 | cr4_set_bits_irqsoff(X86_CR4_PCE); |
482 | } else |
483 | cr4_clear_bits_irqsoff(X86_CR4_PCE); |
484 | } |
485 | |
486 | void cr4_update_pce(void *ignored) |
487 | { |
488 | cr4_update_pce_mm(this_cpu_read(cpu_tlbstate.loaded_mm)); |
489 | } |
490 | |
491 | #else |
492 | static inline void cr4_update_pce_mm(struct mm_struct *mm) { } |
493 | #endif |
494 | |
495 | /* |
496 | * This optimizes when not actually switching mm's. Some architectures use the |
497 | * 'unused' argument for this optimization, but x86 must use |
498 | * 'cpu_tlbstate.loaded_mm' instead because it does not always keep |
499 | * 'current->active_mm' up to date. |
500 | */ |
501 | void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, |
502 | struct task_struct *tsk) |
503 | { |
504 | struct mm_struct *prev = this_cpu_read(cpu_tlbstate.loaded_mm); |
505 | u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); |
506 | unsigned long new_lam = mm_lam_cr3_mask(mm: next); |
507 | bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy); |
508 | unsigned cpu = smp_processor_id(); |
509 | u64 next_tlb_gen; |
510 | bool need_flush; |
511 | u16 new_asid; |
512 | |
513 | /* We don't want flush_tlb_func() to run concurrently with us. */ |
514 | if (IS_ENABLED(CONFIG_PROVE_LOCKING)) |
515 | WARN_ON_ONCE(!irqs_disabled()); |
516 | |
517 | /* |
518 | * Verify that CR3 is what we think it is. This will catch |
519 | * hypothetical buggy code that directly switches to swapper_pg_dir |
520 | * without going through leave_mm() / switch_mm_irqs_off() or that |
521 | * does something like write_cr3(read_cr3_pa()). |
522 | * |
523 | * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3() |
524 | * isn't free. |
525 | */ |
526 | #ifdef CONFIG_DEBUG_VM |
527 | if (WARN_ON_ONCE(__read_cr3() != build_cr3(prev->pgd, prev_asid, |
528 | tlbstate_lam_cr3_mask()))) { |
529 | /* |
530 | * If we were to BUG here, we'd be very likely to kill |
531 | * the system so hard that we don't see the call trace. |
532 | * Try to recover instead by ignoring the error and doing |
533 | * a global flush to minimize the chance of corruption. |
534 | * |
535 | * (This is far from being a fully correct recovery. |
536 | * Architecturally, the CPU could prefetch something |
537 | * back into an incorrect ASID slot and leave it there |
538 | * to cause trouble down the road. It's better than |
539 | * nothing, though.) |
540 | */ |
541 | __flush_tlb_all(); |
542 | } |
543 | #endif |
544 | if (was_lazy) |
545 | this_cpu_write(cpu_tlbstate_shared.is_lazy, false); |
546 | |
547 | /* |
548 | * The membarrier system call requires a full memory barrier and |
549 | * core serialization before returning to user-space, after |
550 | * storing to rq->curr, when changing mm. This is because |
551 | * membarrier() sends IPIs to all CPUs that are in the target mm |
552 | * to make them issue memory barriers. However, if another CPU |
553 | * switches to/from the target mm concurrently with |
554 | * membarrier(), it can cause that CPU not to receive an IPI |
555 | * when it really should issue a memory barrier. Writing to CR3 |
556 | * provides that full memory barrier and core serializing |
557 | * instruction. |
558 | */ |
559 | if (prev == next) { |
560 | /* Not actually switching mm's */ |
561 | VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != |
562 | next->context.ctx_id); |
563 | |
564 | /* |
565 | * If this races with another thread that enables lam, 'new_lam' |
566 | * might not match tlbstate_lam_cr3_mask(). |
567 | */ |
568 | |
569 | /* |
570 | * Even in lazy TLB mode, the CPU should stay set in the |
571 | * mm_cpumask. The TLB shootdown code can figure out from |
572 | * cpu_tlbstate_shared.is_lazy whether or not to send an IPI. |
573 | */ |
574 | if (WARN_ON_ONCE(prev != &init_mm && |
575 | !cpumask_test_cpu(cpu, mm_cpumask(next)))) |
576 | cpumask_set_cpu(cpu, dstp: mm_cpumask(mm: next)); |
577 | |
578 | /* |
579 | * If the CPU is not in lazy TLB mode, we are just switching |
580 | * from one thread in a process to another thread in the same |
581 | * process. No TLB flush required. |
582 | */ |
583 | if (!was_lazy) |
584 | return; |
585 | |
586 | /* |
587 | * Read the tlb_gen to check whether a flush is needed. |
588 | * If the TLB is up to date, just use it. |
589 | * The barrier synchronizes with the tlb_gen increment in |
590 | * the TLB shootdown code. |
591 | */ |
592 | smp_mb(); |
593 | next_tlb_gen = atomic64_read(v: &next->context.tlb_gen); |
594 | if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) == |
595 | next_tlb_gen) |
596 | return; |
597 | |
598 | /* |
599 | * TLB contents went out of date while we were in lazy |
600 | * mode. Fall through to the TLB switching code below. |
601 | */ |
602 | new_asid = prev_asid; |
603 | need_flush = true; |
604 | } else { |
605 | /* |
606 | * Apply process to process speculation vulnerability |
607 | * mitigations if applicable. |
608 | */ |
609 | cond_mitigation(next: tsk); |
610 | |
611 | /* |
612 | * Stop remote flushes for the previous mm. |
613 | * Skip kernel threads; we never send init_mm TLB flushing IPIs, |
614 | * but the bitmap manipulation can cause cache line contention. |
615 | */ |
616 | if (prev != &init_mm) { |
617 | VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, |
618 | mm_cpumask(prev))); |
619 | cpumask_clear_cpu(cpu, dstp: mm_cpumask(mm: prev)); |
620 | } |
621 | |
622 | /* |
623 | * Start remote flushes and then read tlb_gen. |
624 | */ |
625 | if (next != &init_mm) |
626 | cpumask_set_cpu(cpu, dstp: mm_cpumask(mm: next)); |
627 | next_tlb_gen = atomic64_read(v: &next->context.tlb_gen); |
628 | |
629 | choose_new_asid(next, next_tlb_gen, new_asid: &new_asid, need_flush: &need_flush); |
630 | |
631 | /* Let nmi_uaccess_okay() know that we're changing CR3. */ |
632 | this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); |
633 | barrier(); |
634 | } |
635 | |
636 | set_tlbstate_lam_mode(next); |
637 | if (need_flush) { |
638 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); |
639 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); |
640 | load_new_mm_cr3(pgdir: next->pgd, new_asid, lam: new_lam, need_flush: true); |
641 | |
642 | trace_tlb_flush(reason: TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
643 | } else { |
644 | /* The new ASID is already up to date. */ |
645 | load_new_mm_cr3(pgdir: next->pgd, new_asid, lam: new_lam, need_flush: false); |
646 | |
647 | trace_tlb_flush(reason: TLB_FLUSH_ON_TASK_SWITCH, pages: 0); |
648 | } |
649 | |
650 | /* Make sure we write CR3 before loaded_mm. */ |
651 | barrier(); |
652 | |
653 | this_cpu_write(cpu_tlbstate.loaded_mm, next); |
654 | this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); |
655 | |
656 | if (next != prev) { |
657 | cr4_update_pce_mm(mm: next); |
658 | switch_ldt(prev, next); |
659 | } |
660 | } |
661 | |
662 | /* |
663 | * Please ignore the name of this function. It should be called |
664 | * switch_to_kernel_thread(). |
665 | * |
666 | * enter_lazy_tlb() is a hint from the scheduler that we are entering a |
667 | * kernel thread or other context without an mm. Acceptable implementations |
668 | * include doing nothing whatsoever, switching to init_mm, or various clever |
669 | * lazy tricks to try to minimize TLB flushes. |
670 | * |
671 | * The scheduler reserves the right to call enter_lazy_tlb() several times |
672 | * in a row. It will notify us that we're going back to a real mm by |
673 | * calling switch_mm_irqs_off(). |
674 | */ |
675 | void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
676 | { |
677 | if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) |
678 | return; |
679 | |
680 | this_cpu_write(cpu_tlbstate_shared.is_lazy, true); |
681 | } |
682 | |
683 | /* |
684 | * Call this when reinitializing a CPU. It fixes the following potential |
685 | * problems: |
686 | * |
687 | * - The ASID changed from what cpu_tlbstate thinks it is (most likely |
688 | * because the CPU was taken down and came back up with CR3's PCID |
689 | * bits clear. CPU hotplug can do this. |
690 | * |
691 | * - The TLB contains junk in slots corresponding to inactive ASIDs. |
692 | * |
693 | * - The CPU went so far out to lunch that it may have missed a TLB |
694 | * flush. |
695 | */ |
696 | void initialize_tlbstate_and_flush(void) |
697 | { |
698 | int i; |
699 | struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm); |
700 | u64 tlb_gen = atomic64_read(v: &init_mm.context.tlb_gen); |
701 | unsigned long cr3 = __read_cr3(); |
702 | |
703 | /* Assert that CR3 already references the right mm. */ |
704 | WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd)); |
705 | |
706 | /* LAM expected to be disabled */ |
707 | WARN_ON(cr3 & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57)); |
708 | WARN_ON(mm_lam_cr3_mask(mm)); |
709 | |
710 | /* |
711 | * Assert that CR4.PCIDE is set if needed. (CR4.PCIDE initialization |
712 | * doesn't work like other CR4 bits because it can only be set from |
713 | * long mode.) |
714 | */ |
715 | WARN_ON(boot_cpu_has(X86_FEATURE_PCID) && |
716 | !(cr4_read_shadow() & X86_CR4_PCIDE)); |
717 | |
718 | /* Disable LAM, force ASID 0 and force a TLB flush. */ |
719 | write_cr3(x: build_cr3(pgd: mm->pgd, asid: 0, lam: 0)); |
720 | |
721 | /* Reinitialize tlbstate. */ |
722 | this_cpu_write(cpu_tlbstate.last_user_mm_spec, LAST_USER_MM_INIT); |
723 | this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); |
724 | this_cpu_write(cpu_tlbstate.next_asid, 1); |
725 | this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); |
726 | this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen); |
727 | set_tlbstate_lam_mode(mm); |
728 | |
729 | for (i = 1; i < TLB_NR_DYN_ASIDS; i++) |
730 | this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0); |
731 | } |
732 | |
733 | /* |
734 | * flush_tlb_func()'s memory ordering requirement is that any |
735 | * TLB fills that happen after we flush the TLB are ordered after we |
736 | * read active_mm's tlb_gen. We don't need any explicit barriers |
737 | * because all x86 flush operations are serializing and the |
738 | * atomic64_read operation won't be reordered by the compiler. |
739 | */ |
740 | static void flush_tlb_func(void *info) |
741 | { |
742 | /* |
743 | * We have three different tlb_gen values in here. They are: |
744 | * |
745 | * - mm_tlb_gen: the latest generation. |
746 | * - local_tlb_gen: the generation that this CPU has already caught |
747 | * up to. |
748 | * - f->new_tlb_gen: the generation that the requester of the flush |
749 | * wants us to catch up to. |
750 | */ |
751 | const struct flush_tlb_info *f = info; |
752 | struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); |
753 | u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); |
754 | u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen); |
755 | bool local = smp_processor_id() == f->initiating_cpu; |
756 | unsigned long nr_invalidate = 0; |
757 | u64 mm_tlb_gen; |
758 | |
759 | /* This code cannot presently handle being reentered. */ |
760 | VM_WARN_ON(!irqs_disabled()); |
761 | |
762 | if (!local) { |
763 | inc_irq_stat(irq_tlb_count); |
764 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
765 | |
766 | /* Can only happen on remote CPUs */ |
767 | if (f->mm && f->mm != loaded_mm) |
768 | return; |
769 | } |
770 | |
771 | if (unlikely(loaded_mm == &init_mm)) |
772 | return; |
773 | |
774 | VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != |
775 | loaded_mm->context.ctx_id); |
776 | |
777 | if (this_cpu_read(cpu_tlbstate_shared.is_lazy)) { |
778 | /* |
779 | * We're in lazy mode. We need to at least flush our |
780 | * paging-structure cache to avoid speculatively reading |
781 | * garbage into our TLB. Since switching to init_mm is barely |
782 | * slower than a minimal flush, just switch to init_mm. |
783 | * |
784 | * This should be rare, with native_flush_tlb_multi() skipping |
785 | * IPIs to lazy TLB mode CPUs. |
786 | */ |
787 | switch_mm_irqs_off(NULL, next: &init_mm, NULL); |
788 | return; |
789 | } |
790 | |
791 | if (unlikely(f->new_tlb_gen != TLB_GENERATION_INVALID && |
792 | f->new_tlb_gen <= local_tlb_gen)) { |
793 | /* |
794 | * The TLB is already up to date in respect to f->new_tlb_gen. |
795 | * While the core might be still behind mm_tlb_gen, checking |
796 | * mm_tlb_gen unnecessarily would have negative caching effects |
797 | * so avoid it. |
798 | */ |
799 | return; |
800 | } |
801 | |
802 | /* |
803 | * Defer mm_tlb_gen reading as long as possible to avoid cache |
804 | * contention. |
805 | */ |
806 | mm_tlb_gen = atomic64_read(v: &loaded_mm->context.tlb_gen); |
807 | |
808 | if (unlikely(local_tlb_gen == mm_tlb_gen)) { |
809 | /* |
810 | * There's nothing to do: we're already up to date. This can |
811 | * happen if two concurrent flushes happen -- the first flush to |
812 | * be handled can catch us all the way up, leaving no work for |
813 | * the second flush. |
814 | */ |
815 | goto done; |
816 | } |
817 | |
818 | WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen); |
819 | WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen); |
820 | |
821 | /* |
822 | * If we get to this point, we know that our TLB is out of date. |
823 | * This does not strictly imply that we need to flush (it's |
824 | * possible that f->new_tlb_gen <= local_tlb_gen), but we're |
825 | * going to need to flush in the very near future, so we might |
826 | * as well get it over with. |
827 | * |
828 | * The only question is whether to do a full or partial flush. |
829 | * |
830 | * We do a partial flush if requested and two extra conditions |
831 | * are met: |
832 | * |
833 | * 1. f->new_tlb_gen == local_tlb_gen + 1. We have an invariant that |
834 | * we've always done all needed flushes to catch up to |
835 | * local_tlb_gen. If, for example, local_tlb_gen == 2 and |
836 | * f->new_tlb_gen == 3, then we know that the flush needed to bring |
837 | * us up to date for tlb_gen 3 is the partial flush we're |
838 | * processing. |
839 | * |
840 | * As an example of why this check is needed, suppose that there |
841 | * are two concurrent flushes. The first is a full flush that |
842 | * changes context.tlb_gen from 1 to 2. The second is a partial |
843 | * flush that changes context.tlb_gen from 2 to 3. If they get |
844 | * processed on this CPU in reverse order, we'll see |
845 | * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL. |
846 | * If we were to use __flush_tlb_one_user() and set local_tlb_gen to |
847 | * 3, we'd be break the invariant: we'd update local_tlb_gen above |
848 | * 1 without the full flush that's needed for tlb_gen 2. |
849 | * |
850 | * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimization. |
851 | * Partial TLB flushes are not all that much cheaper than full TLB |
852 | * flushes, so it seems unlikely that it would be a performance win |
853 | * to do a partial flush if that won't bring our TLB fully up to |
854 | * date. By doing a full flush instead, we can increase |
855 | * local_tlb_gen all the way to mm_tlb_gen and we can probably |
856 | * avoid another flush in the very near future. |
857 | */ |
858 | if (f->end != TLB_FLUSH_ALL && |
859 | f->new_tlb_gen == local_tlb_gen + 1 && |
860 | f->new_tlb_gen == mm_tlb_gen) { |
861 | /* Partial flush */ |
862 | unsigned long addr = f->start; |
863 | |
864 | /* Partial flush cannot have invalid generations */ |
865 | VM_WARN_ON(f->new_tlb_gen == TLB_GENERATION_INVALID); |
866 | |
867 | /* Partial flush must have valid mm */ |
868 | VM_WARN_ON(f->mm == NULL); |
869 | |
870 | nr_invalidate = (f->end - f->start) >> f->stride_shift; |
871 | |
872 | while (addr < f->end) { |
873 | flush_tlb_one_user(addr); |
874 | addr += 1UL << f->stride_shift; |
875 | } |
876 | if (local) |
877 | count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate); |
878 | } else { |
879 | /* Full flush. */ |
880 | nr_invalidate = TLB_FLUSH_ALL; |
881 | |
882 | flush_tlb_local(); |
883 | if (local) |
884 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
885 | } |
886 | |
887 | /* Both paths above update our state to mm_tlb_gen. */ |
888 | this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); |
889 | |
890 | /* Tracing is done in a unified manner to reduce the code size */ |
891 | done: |
892 | trace_tlb_flush(reason: !local ? TLB_REMOTE_SHOOTDOWN : |
893 | (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : |
894 | TLB_LOCAL_MM_SHOOTDOWN, |
895 | pages: nr_invalidate); |
896 | } |
897 | |
898 | static bool tlb_is_not_lazy(int cpu, void *data) |
899 | { |
900 | return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu); |
901 | } |
902 | |
903 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared); |
904 | EXPORT_PER_CPU_SYMBOL(cpu_tlbstate_shared); |
905 | |
906 | STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask, |
907 | const struct flush_tlb_info *info) |
908 | { |
909 | /* |
910 | * Do accounting and tracing. Note that there are (and have always been) |
911 | * cases in which a remote TLB flush will be traced, but eventually |
912 | * would not happen. |
913 | */ |
914 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); |
915 | if (info->end == TLB_FLUSH_ALL) |
916 | trace_tlb_flush(reason: TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL); |
917 | else |
918 | trace_tlb_flush(reason: TLB_REMOTE_SEND_IPI, |
919 | pages: (info->end - info->start) >> PAGE_SHIFT); |
920 | |
921 | /* |
922 | * If no page tables were freed, we can skip sending IPIs to |
923 | * CPUs in lazy TLB mode. They will flush the CPU themselves |
924 | * at the next context switch. |
925 | * |
926 | * However, if page tables are getting freed, we need to send the |
927 | * IPI everywhere, to prevent CPUs in lazy TLB mode from tripping |
928 | * up on the new contents of what used to be page tables, while |
929 | * doing a speculative memory access. |
930 | */ |
931 | if (info->freed_tables) |
932 | on_each_cpu_mask(mask: cpumask, func: flush_tlb_func, info: (void *)info, wait: true); |
933 | else |
934 | on_each_cpu_cond_mask(cond_func: tlb_is_not_lazy, func: flush_tlb_func, |
935 | info: (void *)info, wait: 1, mask: cpumask); |
936 | } |
937 | |
938 | void flush_tlb_multi(const struct cpumask *cpumask, |
939 | const struct flush_tlb_info *info) |
940 | { |
941 | __flush_tlb_multi(cpumask, info); |
942 | } |
943 | |
944 | /* |
945 | * See Documentation/arch/x86/tlb.rst for details. We choose 33 |
946 | * because it is large enough to cover the vast majority (at |
947 | * least 95%) of allocations, and is small enough that we are |
948 | * confident it will not cause too much overhead. Each single |
949 | * flush is about 100 ns, so this caps the maximum overhead at |
950 | * _about_ 3,000 ns. |
951 | * |
952 | * This is in units of pages. |
953 | */ |
954 | unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; |
955 | |
956 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info); |
957 | |
958 | #ifdef CONFIG_DEBUG_VM |
959 | static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx); |
960 | #endif |
961 | |
962 | static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm, |
963 | unsigned long start, unsigned long end, |
964 | unsigned int stride_shift, bool freed_tables, |
965 | u64 new_tlb_gen) |
966 | { |
967 | struct flush_tlb_info *info = this_cpu_ptr(&flush_tlb_info); |
968 | |
969 | #ifdef CONFIG_DEBUG_VM |
970 | /* |
971 | * Ensure that the following code is non-reentrant and flush_tlb_info |
972 | * is not overwritten. This means no TLB flushing is initiated by |
973 | * interrupt handlers and machine-check exception handlers. |
974 | */ |
975 | BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1); |
976 | #endif |
977 | |
978 | info->start = start; |
979 | info->end = end; |
980 | info->mm = mm; |
981 | info->stride_shift = stride_shift; |
982 | info->freed_tables = freed_tables; |
983 | info->new_tlb_gen = new_tlb_gen; |
984 | info->initiating_cpu = smp_processor_id(); |
985 | |
986 | return info; |
987 | } |
988 | |
989 | static void put_flush_tlb_info(void) |
990 | { |
991 | #ifdef CONFIG_DEBUG_VM |
992 | /* Complete reentrancy prevention checks */ |
993 | barrier(); |
994 | this_cpu_dec(flush_tlb_info_idx); |
995 | #endif |
996 | } |
997 | |
998 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
999 | unsigned long end, unsigned int stride_shift, |
1000 | bool freed_tables) |
1001 | { |
1002 | struct flush_tlb_info *info; |
1003 | u64 new_tlb_gen; |
1004 | int cpu; |
1005 | |
1006 | cpu = get_cpu(); |
1007 | |
1008 | /* Should we flush just the requested range? */ |
1009 | if ((end == TLB_FLUSH_ALL) || |
1010 | ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) { |
1011 | start = 0; |
1012 | end = TLB_FLUSH_ALL; |
1013 | } |
1014 | |
1015 | /* This is also a barrier that synchronizes with switch_mm(). */ |
1016 | new_tlb_gen = inc_mm_tlb_gen(mm); |
1017 | |
1018 | info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables, |
1019 | new_tlb_gen); |
1020 | |
1021 | /* |
1022 | * flush_tlb_multi() is not optimized for the common case in which only |
1023 | * a local TLB flush is needed. Optimize this use-case by calling |
1024 | * flush_tlb_func_local() directly in this case. |
1025 | */ |
1026 | if (cpumask_any_but(mask: mm_cpumask(mm), cpu) < nr_cpu_ids) { |
1027 | flush_tlb_multi(cpumask: mm_cpumask(mm), info); |
1028 | } else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) { |
1029 | lockdep_assert_irqs_enabled(); |
1030 | local_irq_disable(); |
1031 | flush_tlb_func(info); |
1032 | local_irq_enable(); |
1033 | } |
1034 | |
1035 | put_flush_tlb_info(); |
1036 | put_cpu(); |
1037 | mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); |
1038 | } |
1039 | |
1040 | |
1041 | static void do_flush_tlb_all(void *info) |
1042 | { |
1043 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
1044 | __flush_tlb_all(); |
1045 | } |
1046 | |
1047 | void flush_tlb_all(void) |
1048 | { |
1049 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); |
1050 | on_each_cpu(func: do_flush_tlb_all, NULL, wait: 1); |
1051 | } |
1052 | |
1053 | static void do_kernel_range_flush(void *info) |
1054 | { |
1055 | struct flush_tlb_info *f = info; |
1056 | unsigned long addr; |
1057 | |
1058 | /* flush range by one by one 'invlpg' */ |
1059 | for (addr = f->start; addr < f->end; addr += PAGE_SIZE) |
1060 | flush_tlb_one_kernel(addr); |
1061 | } |
1062 | |
1063 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
1064 | { |
1065 | /* Balance as user space task's flush, a bit conservative */ |
1066 | if (end == TLB_FLUSH_ALL || |
1067 | (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) { |
1068 | on_each_cpu(func: do_flush_tlb_all, NULL, wait: 1); |
1069 | } else { |
1070 | struct flush_tlb_info *info; |
1071 | |
1072 | preempt_disable(); |
1073 | info = get_flush_tlb_info(NULL, start, end, stride_shift: 0, freed_tables: false, |
1074 | TLB_GENERATION_INVALID); |
1075 | |
1076 | on_each_cpu(func: do_kernel_range_flush, info, wait: 1); |
1077 | |
1078 | put_flush_tlb_info(); |
1079 | preempt_enable(); |
1080 | } |
1081 | } |
1082 | |
1083 | /* |
1084 | * This can be used from process context to figure out what the value of |
1085 | * CR3 is without needing to do a (slow) __read_cr3(). |
1086 | * |
1087 | * It's intended to be used for code like KVM that sneakily changes CR3 |
1088 | * and needs to restore it. It needs to be used very carefully. |
1089 | */ |
1090 | unsigned long __get_current_cr3_fast(void) |
1091 | { |
1092 | unsigned long cr3 = |
1093 | build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd, |
1094 | this_cpu_read(cpu_tlbstate.loaded_mm_asid), |
1095 | lam: tlbstate_lam_cr3_mask()); |
1096 | |
1097 | /* For now, be very restrictive about when this can be called. */ |
1098 | VM_WARN_ON(in_nmi() || preemptible()); |
1099 | |
1100 | VM_BUG_ON(cr3 != __read_cr3()); |
1101 | return cr3; |
1102 | } |
1103 | EXPORT_SYMBOL_GPL(__get_current_cr3_fast); |
1104 | |
1105 | /* |
1106 | * Flush one page in the kernel mapping |
1107 | */ |
1108 | void flush_tlb_one_kernel(unsigned long addr) |
1109 | { |
1110 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); |
1111 | |
1112 | /* |
1113 | * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its |
1114 | * paravirt equivalent. Even with PCID, this is sufficient: we only |
1115 | * use PCID if we also use global PTEs for the kernel mapping, and |
1116 | * INVLPG flushes global translations across all address spaces. |
1117 | * |
1118 | * If PTI is on, then the kernel is mapped with non-global PTEs, and |
1119 | * __flush_tlb_one_user() will flush the given address for the current |
1120 | * kernel address space and for its usermode counterpart, but it does |
1121 | * not flush it for other address spaces. |
1122 | */ |
1123 | flush_tlb_one_user(addr); |
1124 | |
1125 | if (!static_cpu_has(X86_FEATURE_PTI)) |
1126 | return; |
1127 | |
1128 | /* |
1129 | * See above. We need to propagate the flush to all other address |
1130 | * spaces. In principle, we only need to propagate it to kernelmode |
1131 | * address spaces, but the extra bookkeeping we would need is not |
1132 | * worth it. |
1133 | */ |
1134 | this_cpu_write(cpu_tlbstate.invalidate_other, true); |
1135 | } |
1136 | |
1137 | /* |
1138 | * Flush one page in the user mapping |
1139 | */ |
1140 | STATIC_NOPV void native_flush_tlb_one_user(unsigned long addr) |
1141 | { |
1142 | u32 loaded_mm_asid; |
1143 | bool cpu_pcide; |
1144 | |
1145 | /* Flush 'addr' from the kernel PCID: */ |
1146 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory" ); |
1147 | |
1148 | /* If PTI is off there is no user PCID and nothing to flush. */ |
1149 | if (!static_cpu_has(X86_FEATURE_PTI)) |
1150 | return; |
1151 | |
1152 | loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); |
1153 | cpu_pcide = this_cpu_read(cpu_tlbstate.cr4) & X86_CR4_PCIDE; |
1154 | |
1155 | /* |
1156 | * invpcid_flush_one(pcid>0) will #GP if CR4.PCIDE==0. Check |
1157 | * 'cpu_pcide' to ensure that *this* CPU will not trigger those |
1158 | * #GP's even if called before CR4.PCIDE has been initialized. |
1159 | */ |
1160 | if (boot_cpu_has(X86_FEATURE_INVPCID) && cpu_pcide) |
1161 | invpcid_flush_one(pcid: user_pcid(asid: loaded_mm_asid), addr); |
1162 | else |
1163 | invalidate_user_asid(asid: loaded_mm_asid); |
1164 | } |
1165 | |
1166 | void flush_tlb_one_user(unsigned long addr) |
1167 | { |
1168 | __flush_tlb_one_user(addr); |
1169 | } |
1170 | |
1171 | /* |
1172 | * Flush everything |
1173 | */ |
1174 | STATIC_NOPV void native_flush_tlb_global(void) |
1175 | { |
1176 | unsigned long flags; |
1177 | |
1178 | if (static_cpu_has(X86_FEATURE_INVPCID)) { |
1179 | /* |
1180 | * Using INVPCID is considerably faster than a pair of writes |
1181 | * to CR4 sandwiched inside an IRQ flag save/restore. |
1182 | * |
1183 | * Note, this works with CR4.PCIDE=0 or 1. |
1184 | */ |
1185 | invpcid_flush_all(); |
1186 | return; |
1187 | } |
1188 | |
1189 | /* |
1190 | * Read-modify-write to CR4 - protect it from preemption and |
1191 | * from interrupts. (Use the raw variant because this code can |
1192 | * be called from deep inside debugging code.) |
1193 | */ |
1194 | raw_local_irq_save(flags); |
1195 | |
1196 | __native_tlb_flush_global(this_cpu_read(cpu_tlbstate.cr4)); |
1197 | |
1198 | raw_local_irq_restore(flags); |
1199 | } |
1200 | |
1201 | /* |
1202 | * Flush the entire current user mapping |
1203 | */ |
1204 | STATIC_NOPV void native_flush_tlb_local(void) |
1205 | { |
1206 | /* |
1207 | * Preemption or interrupts must be disabled to protect the access |
1208 | * to the per CPU variable and to prevent being preempted between |
1209 | * read_cr3() and write_cr3(). |
1210 | */ |
1211 | WARN_ON_ONCE(preemptible()); |
1212 | |
1213 | invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); |
1214 | |
1215 | /* If current->mm == NULL then the read_cr3() "borrows" an mm */ |
1216 | native_write_cr3(val: __native_read_cr3()); |
1217 | } |
1218 | |
1219 | void flush_tlb_local(void) |
1220 | { |
1221 | __flush_tlb_local(); |
1222 | } |
1223 | |
1224 | /* |
1225 | * Flush everything |
1226 | */ |
1227 | void __flush_tlb_all(void) |
1228 | { |
1229 | /* |
1230 | * This is to catch users with enabled preemption and the PGE feature |
1231 | * and don't trigger the warning in __native_flush_tlb(). |
1232 | */ |
1233 | VM_WARN_ON_ONCE(preemptible()); |
1234 | |
1235 | if (cpu_feature_enabled(X86_FEATURE_PGE)) { |
1236 | __flush_tlb_global(); |
1237 | } else { |
1238 | /* |
1239 | * !PGE -> !PCID (setup_pcid()), thus every flush is total. |
1240 | */ |
1241 | flush_tlb_local(); |
1242 | } |
1243 | } |
1244 | EXPORT_SYMBOL_GPL(__flush_tlb_all); |
1245 | |
1246 | void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) |
1247 | { |
1248 | struct flush_tlb_info *info; |
1249 | |
1250 | int cpu = get_cpu(); |
1251 | |
1252 | info = get_flush_tlb_info(NULL, start: 0, TLB_FLUSH_ALL, stride_shift: 0, freed_tables: false, |
1253 | TLB_GENERATION_INVALID); |
1254 | /* |
1255 | * flush_tlb_multi() is not optimized for the common case in which only |
1256 | * a local TLB flush is needed. Optimize this use-case by calling |
1257 | * flush_tlb_func_local() directly in this case. |
1258 | */ |
1259 | if (cpumask_any_but(mask: &batch->cpumask, cpu) < nr_cpu_ids) { |
1260 | flush_tlb_multi(cpumask: &batch->cpumask, info); |
1261 | } else if (cpumask_test_cpu(cpu, cpumask: &batch->cpumask)) { |
1262 | lockdep_assert_irqs_enabled(); |
1263 | local_irq_disable(); |
1264 | flush_tlb_func(info); |
1265 | local_irq_enable(); |
1266 | } |
1267 | |
1268 | cpumask_clear(dstp: &batch->cpumask); |
1269 | |
1270 | put_flush_tlb_info(); |
1271 | put_cpu(); |
1272 | } |
1273 | |
1274 | /* |
1275 | * Blindly accessing user memory from NMI context can be dangerous |
1276 | * if we're in the middle of switching the current user task or |
1277 | * switching the loaded mm. It can also be dangerous if we |
1278 | * interrupted some kernel code that was temporarily using a |
1279 | * different mm. |
1280 | */ |
1281 | bool nmi_uaccess_okay(void) |
1282 | { |
1283 | struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); |
1284 | struct mm_struct *current_mm = current->mm; |
1285 | |
1286 | VM_WARN_ON_ONCE(!loaded_mm); |
1287 | |
1288 | /* |
1289 | * The condition we want to check is |
1290 | * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, |
1291 | * if we're running in a VM with shadow paging, and nmi_uaccess_okay() |
1292 | * is supposed to be reasonably fast. |
1293 | * |
1294 | * Instead, we check the almost equivalent but somewhat conservative |
1295 | * condition below, and we rely on the fact that switch_mm_irqs_off() |
1296 | * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3. |
1297 | */ |
1298 | if (loaded_mm != current_mm) |
1299 | return false; |
1300 | |
1301 | VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa())); |
1302 | |
1303 | return true; |
1304 | } |
1305 | |
1306 | static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, |
1307 | size_t count, loff_t *ppos) |
1308 | { |
1309 | char buf[32]; |
1310 | unsigned int len; |
1311 | |
1312 | len = sprintf(buf, fmt: "%ld\n" , tlb_single_page_flush_ceiling); |
1313 | return simple_read_from_buffer(to: user_buf, count, ppos, from: buf, available: len); |
1314 | } |
1315 | |
1316 | static ssize_t tlbflush_write_file(struct file *file, |
1317 | const char __user *user_buf, size_t count, loff_t *ppos) |
1318 | { |
1319 | char buf[32]; |
1320 | ssize_t len; |
1321 | int ceiling; |
1322 | |
1323 | len = min(count, sizeof(buf) - 1); |
1324 | if (copy_from_user(to: buf, from: user_buf, n: len)) |
1325 | return -EFAULT; |
1326 | |
1327 | buf[len] = '\0'; |
1328 | if (kstrtoint(s: buf, base: 0, res: &ceiling)) |
1329 | return -EINVAL; |
1330 | |
1331 | if (ceiling < 0) |
1332 | return -EINVAL; |
1333 | |
1334 | tlb_single_page_flush_ceiling = ceiling; |
1335 | return count; |
1336 | } |
1337 | |
1338 | static const struct file_operations fops_tlbflush = { |
1339 | .read = tlbflush_read_file, |
1340 | .write = tlbflush_write_file, |
1341 | .llseek = default_llseek, |
1342 | }; |
1343 | |
1344 | static int __init create_tlb_single_page_flush_ceiling(void) |
1345 | { |
1346 | debugfs_create_file(name: "tlb_single_page_flush_ceiling" , S_IRUSR | S_IWUSR, |
1347 | parent: arch_debugfs_dir, NULL, fops: &fops_tlbflush); |
1348 | return 0; |
1349 | } |
1350 | late_initcall(create_tlb_single_page_flush_ceiling); |
1351 | |