1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 1994 Linus Torvalds |
4 | * |
5 | * Pentium III FXSR, SSE support |
6 | * General FPU state handling cleanups |
7 | * Gareth Hughes <gareth@valinux.com>, May 2000 |
8 | */ |
9 | #include <asm/fpu/api.h> |
10 | #include <asm/fpu/regset.h> |
11 | #include <asm/fpu/sched.h> |
12 | #include <asm/fpu/signal.h> |
13 | #include <asm/fpu/types.h> |
14 | #include <asm/traps.h> |
15 | #include <asm/irq_regs.h> |
16 | |
17 | #include <uapi/asm/kvm.h> |
18 | |
19 | #include <linux/hardirq.h> |
20 | #include <linux/pkeys.h> |
21 | #include <linux/vmalloc.h> |
22 | |
23 | #include "context.h" |
24 | #include "internal.h" |
25 | #include "legacy.h" |
26 | #include "xstate.h" |
27 | |
28 | #define CREATE_TRACE_POINTS |
29 | #include <asm/trace/fpu.h> |
30 | |
31 | #ifdef CONFIG_X86_64 |
32 | DEFINE_STATIC_KEY_FALSE(__fpu_state_size_dynamic); |
33 | DEFINE_PER_CPU(u64, xfd_state); |
34 | #endif |
35 | |
36 | /* The FPU state configuration data for kernel and user space */ |
37 | struct fpu_state_config fpu_kernel_cfg __ro_after_init; |
38 | struct fpu_state_config fpu_user_cfg __ro_after_init; |
39 | |
40 | /* |
41 | * Represents the initial FPU state. It's mostly (but not completely) zeroes, |
42 | * depending on the FPU hardware format: |
43 | */ |
44 | struct fpstate init_fpstate __ro_after_init; |
45 | |
46 | /* Track in-kernel FPU usage */ |
47 | static DEFINE_PER_CPU(bool, in_kernel_fpu); |
48 | |
49 | /* |
50 | * Track which context is using the FPU on the CPU: |
51 | */ |
52 | DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); |
53 | |
54 | /* |
55 | * Can we use the FPU in kernel mode with the |
56 | * whole "kernel_fpu_begin/end()" sequence? |
57 | */ |
58 | bool irq_fpu_usable(void) |
59 | { |
60 | if (WARN_ON_ONCE(in_nmi())) |
61 | return false; |
62 | |
63 | /* In kernel FPU usage already active? */ |
64 | if (this_cpu_read(in_kernel_fpu)) |
65 | return false; |
66 | |
67 | /* |
68 | * When not in NMI or hard interrupt context, FPU can be used in: |
69 | * |
70 | * - Task context except from within fpregs_lock()'ed critical |
71 | * regions. |
72 | * |
73 | * - Soft interrupt processing context which cannot happen |
74 | * while in a fpregs_lock()'ed critical region. |
75 | */ |
76 | if (!in_hardirq()) |
77 | return true; |
78 | |
79 | /* |
80 | * In hard interrupt context it's safe when soft interrupts |
81 | * are enabled, which means the interrupt did not hit in |
82 | * a fpregs_lock()'ed critical region. |
83 | */ |
84 | return !softirq_count(); |
85 | } |
86 | EXPORT_SYMBOL(irq_fpu_usable); |
87 | |
88 | /* |
89 | * Track AVX512 state use because it is known to slow the max clock |
90 | * speed of the core. |
91 | */ |
92 | static void update_avx_timestamp(struct fpu *fpu) |
93 | { |
94 | |
95 | #define AVX512_TRACKING_MASK (XFEATURE_MASK_ZMM_Hi256 | XFEATURE_MASK_Hi16_ZMM) |
96 | |
97 | if (fpu->fpstate->regs.xsave.header.xfeatures & AVX512_TRACKING_MASK) |
98 | fpu->avx512_timestamp = jiffies; |
99 | } |
100 | |
101 | /* |
102 | * Save the FPU register state in fpu->fpstate->regs. The register state is |
103 | * preserved. |
104 | * |
105 | * Must be called with fpregs_lock() held. |
106 | * |
107 | * The legacy FNSAVE instruction clears all FPU state unconditionally, so |
108 | * register state has to be reloaded. That might be a pointless exercise |
109 | * when the FPU is going to be used by another task right after that. But |
110 | * this only affects 20+ years old 32bit systems and avoids conditionals all |
111 | * over the place. |
112 | * |
113 | * FXSAVE and all XSAVE variants preserve the FPU register state. |
114 | */ |
115 | void save_fpregs_to_fpstate(struct fpu *fpu) |
116 | { |
117 | if (likely(use_xsave())) { |
118 | os_xsave(fpstate: fpu->fpstate); |
119 | update_avx_timestamp(fpu); |
120 | return; |
121 | } |
122 | |
123 | if (likely(use_fxsr())) { |
124 | fxsave(fx: &fpu->fpstate->regs.fxsave); |
125 | return; |
126 | } |
127 | |
128 | /* |
129 | * Legacy FPU register saving, FNSAVE always clears FPU registers, |
130 | * so we have to reload them from the memory state. |
131 | */ |
132 | asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->fpstate->regs.fsave)); |
133 | frstor(fx: &fpu->fpstate->regs.fsave); |
134 | } |
135 | |
136 | void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask) |
137 | { |
138 | /* |
139 | * AMD K7/K8 and later CPUs up to Zen don't save/restore |
140 | * FDP/FIP/FOP unless an exception is pending. Clear the x87 state |
141 | * here by setting it to fixed values. "m" is a random variable |
142 | * that should be in L1. |
143 | */ |
144 | if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) { |
145 | asm volatile( |
146 | "fnclex\n\t" |
147 | "emms\n\t" |
148 | "fildl %P[addr]" /* set F?P to defined value */ |
149 | : : [addr] "m" (fpstate)); |
150 | } |
151 | |
152 | if (use_xsave()) { |
153 | /* |
154 | * Dynamically enabled features are enabled in XCR0, but |
155 | * usage requires also that the corresponding bits in XFD |
156 | * are cleared. If the bits are set then using a related |
157 | * instruction will raise #NM. This allows to do the |
158 | * allocation of the larger FPU buffer lazy from #NM or if |
159 | * the task has no permission to kill it which would happen |
160 | * via #UD if the feature is disabled in XCR0. |
161 | * |
162 | * XFD state is following the same life time rules as |
163 | * XSTATE and to restore state correctly XFD has to be |
164 | * updated before XRSTORS otherwise the component would |
165 | * stay in or go into init state even if the bits are set |
166 | * in fpstate::regs::xsave::xfeatures. |
167 | */ |
168 | xfd_update_state(fpstate); |
169 | |
170 | /* |
171 | * Restoring state always needs to modify all features |
172 | * which are in @mask even if the current task cannot use |
173 | * extended features. |
174 | * |
175 | * So fpstate->xfeatures cannot be used here, because then |
176 | * a feature for which the task has no permission but was |
177 | * used by the previous task would not go into init state. |
178 | */ |
179 | mask = fpu_kernel_cfg.max_features & mask; |
180 | |
181 | os_xrstor(fpstate, mask); |
182 | } else { |
183 | if (use_fxsr()) |
184 | fxrstor(fx: &fpstate->regs.fxsave); |
185 | else |
186 | frstor(fx: &fpstate->regs.fsave); |
187 | } |
188 | } |
189 | |
190 | void fpu_reset_from_exception_fixup(void) |
191 | { |
192 | restore_fpregs_from_fpstate(fpstate: &init_fpstate, XFEATURE_MASK_FPSTATE); |
193 | } |
194 | |
195 | #if IS_ENABLED(CONFIG_KVM) |
196 | static void __fpstate_reset(struct fpstate *fpstate, u64 xfd); |
197 | |
198 | static void fpu_init_guest_permissions(struct fpu_guest *gfpu) |
199 | { |
200 | struct fpu_state_perm *fpuperm; |
201 | u64 perm; |
202 | |
203 | if (!IS_ENABLED(CONFIG_X86_64)) |
204 | return; |
205 | |
206 | spin_lock_irq(lock: ¤t->sighand->siglock); |
207 | fpuperm = ¤t->group_leader->thread.fpu.guest_perm; |
208 | perm = fpuperm->__state_perm; |
209 | |
210 | /* First fpstate allocation locks down permissions. */ |
211 | WRITE_ONCE(fpuperm->__state_perm, perm | FPU_GUEST_PERM_LOCKED); |
212 | |
213 | spin_unlock_irq(lock: ¤t->sighand->siglock); |
214 | |
215 | gfpu->perm = perm & ~FPU_GUEST_PERM_LOCKED; |
216 | } |
217 | |
218 | bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) |
219 | { |
220 | struct fpstate *fpstate; |
221 | unsigned int size; |
222 | |
223 | size = fpu_user_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64); |
224 | fpstate = vzalloc(size); |
225 | if (!fpstate) |
226 | return false; |
227 | |
228 | /* Leave xfd to 0 (the reset value defined by spec) */ |
229 | __fpstate_reset(fpstate, xfd: 0); |
230 | fpstate_init_user(fpstate); |
231 | fpstate->is_valloc = true; |
232 | fpstate->is_guest = true; |
233 | |
234 | gfpu->fpstate = fpstate; |
235 | gfpu->xfeatures = fpu_user_cfg.default_features; |
236 | gfpu->perm = fpu_user_cfg.default_features; |
237 | |
238 | /* |
239 | * KVM sets the FP+SSE bits in the XSAVE header when copying FPU state |
240 | * to userspace, even when XSAVE is unsupported, so that restoring FPU |
241 | * state on a different CPU that does support XSAVE can cleanly load |
242 | * the incoming state using its natural XSAVE. In other words, KVM's |
243 | * uABI size may be larger than this host's default size. Conversely, |
244 | * the default size should never be larger than KVM's base uABI size; |
245 | * all features that can expand the uABI size must be opt-in. |
246 | */ |
247 | gfpu->uabi_size = sizeof(struct kvm_xsave); |
248 | if (WARN_ON_ONCE(fpu_user_cfg.default_size > gfpu->uabi_size)) |
249 | gfpu->uabi_size = fpu_user_cfg.default_size; |
250 | |
251 | fpu_init_guest_permissions(gfpu); |
252 | |
253 | return true; |
254 | } |
255 | EXPORT_SYMBOL_GPL(fpu_alloc_guest_fpstate); |
256 | |
257 | void fpu_free_guest_fpstate(struct fpu_guest *gfpu) |
258 | { |
259 | struct fpstate *fps = gfpu->fpstate; |
260 | |
261 | if (!fps) |
262 | return; |
263 | |
264 | if (WARN_ON_ONCE(!fps->is_valloc || !fps->is_guest || fps->in_use)) |
265 | return; |
266 | |
267 | gfpu->fpstate = NULL; |
268 | vfree(addr: fps); |
269 | } |
270 | EXPORT_SYMBOL_GPL(fpu_free_guest_fpstate); |
271 | |
272 | /* |
273 | * fpu_enable_guest_xfd_features - Check xfeatures against guest perm and enable |
274 | * @guest_fpu: Pointer to the guest FPU container |
275 | * @xfeatures: Features requested by guest CPUID |
276 | * |
277 | * Enable all dynamic xfeatures according to guest perm and requested CPUID. |
278 | * |
279 | * Return: 0 on success, error code otherwise |
280 | */ |
281 | int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures) |
282 | { |
283 | lockdep_assert_preemption_enabled(); |
284 | |
285 | /* Nothing to do if all requested features are already enabled. */ |
286 | xfeatures &= ~guest_fpu->xfeatures; |
287 | if (!xfeatures) |
288 | return 0; |
289 | |
290 | return __xfd_enable_feature(which: xfeatures, guest_fpu); |
291 | } |
292 | EXPORT_SYMBOL_GPL(fpu_enable_guest_xfd_features); |
293 | |
294 | #ifdef CONFIG_X86_64 |
295 | void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) |
296 | { |
297 | fpregs_lock(); |
298 | guest_fpu->fpstate->xfd = xfd; |
299 | if (guest_fpu->fpstate->in_use) |
300 | xfd_update_state(fpstate: guest_fpu->fpstate); |
301 | fpregs_unlock(); |
302 | } |
303 | EXPORT_SYMBOL_GPL(fpu_update_guest_xfd); |
304 | |
305 | /** |
306 | * fpu_sync_guest_vmexit_xfd_state - Synchronize XFD MSR and software state |
307 | * |
308 | * Must be invoked from KVM after a VMEXIT before enabling interrupts when |
309 | * XFD write emulation is disabled. This is required because the guest can |
310 | * freely modify XFD and the state at VMEXIT is not guaranteed to be the |
311 | * same as the state on VMENTER. So software state has to be udpated before |
312 | * any operation which depends on it can take place. |
313 | * |
314 | * Note: It can be invoked unconditionally even when write emulation is |
315 | * enabled for the price of a then pointless MSR read. |
316 | */ |
317 | void fpu_sync_guest_vmexit_xfd_state(void) |
318 | { |
319 | struct fpstate *fps = current->thread.fpu.fpstate; |
320 | |
321 | lockdep_assert_irqs_disabled(); |
322 | if (fpu_state_size_dynamic()) { |
323 | rdmsrl(MSR_IA32_XFD, fps->xfd); |
324 | __this_cpu_write(xfd_state, fps->xfd); |
325 | } |
326 | } |
327 | EXPORT_SYMBOL_GPL(fpu_sync_guest_vmexit_xfd_state); |
328 | #endif /* CONFIG_X86_64 */ |
329 | |
330 | int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest) |
331 | { |
332 | struct fpstate *guest_fps = guest_fpu->fpstate; |
333 | struct fpu *fpu = ¤t->thread.fpu; |
334 | struct fpstate *cur_fps = fpu->fpstate; |
335 | |
336 | fpregs_lock(); |
337 | if (!cur_fps->is_confidential && !test_thread_flag(TIF_NEED_FPU_LOAD)) |
338 | save_fpregs_to_fpstate(fpu); |
339 | |
340 | /* Swap fpstate */ |
341 | if (enter_guest) { |
342 | fpu->__task_fpstate = cur_fps; |
343 | fpu->fpstate = guest_fps; |
344 | guest_fps->in_use = true; |
345 | } else { |
346 | guest_fps->in_use = false; |
347 | fpu->fpstate = fpu->__task_fpstate; |
348 | fpu->__task_fpstate = NULL; |
349 | } |
350 | |
351 | cur_fps = fpu->fpstate; |
352 | |
353 | if (!cur_fps->is_confidential) { |
354 | /* Includes XFD update */ |
355 | restore_fpregs_from_fpstate(fpstate: cur_fps, XFEATURE_MASK_FPSTATE); |
356 | } else { |
357 | /* |
358 | * XSTATE is restored by firmware from encrypted |
359 | * memory. Make sure XFD state is correct while |
360 | * running with guest fpstate |
361 | */ |
362 | xfd_update_state(fpstate: cur_fps); |
363 | } |
364 | |
365 | fpregs_mark_activate(); |
366 | fpregs_unlock(); |
367 | return 0; |
368 | } |
369 | EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate); |
370 | |
371 | void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, |
372 | unsigned int size, u64 xfeatures, u32 pkru) |
373 | { |
374 | struct fpstate *kstate = gfpu->fpstate; |
375 | union fpregs_state *ustate = buf; |
376 | struct membuf mb = { .p = buf, .left = size }; |
377 | |
378 | if (cpu_feature_enabled(X86_FEATURE_XSAVE)) { |
379 | __copy_xstate_to_uabi_buf(to: mb, fpstate: kstate, xfeatures, pkru_val: pkru, |
380 | copy_mode: XSTATE_COPY_XSAVE); |
381 | } else { |
382 | memcpy(&ustate->fxsave, &kstate->regs.fxsave, |
383 | sizeof(ustate->fxsave)); |
384 | /* Make it restorable on a XSAVE enabled host */ |
385 | ustate->xsave.header.xfeatures = XFEATURE_MASK_FPSSE; |
386 | } |
387 | } |
388 | EXPORT_SYMBOL_GPL(fpu_copy_guest_fpstate_to_uabi); |
389 | |
390 | int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, |
391 | u64 xcr0, u32 *vpkru) |
392 | { |
393 | struct fpstate *kstate = gfpu->fpstate; |
394 | const union fpregs_state *ustate = buf; |
395 | |
396 | if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) { |
397 | if (ustate->xsave.header.xfeatures & ~XFEATURE_MASK_FPSSE) |
398 | return -EINVAL; |
399 | if (ustate->fxsave.mxcsr & ~mxcsr_feature_mask) |
400 | return -EINVAL; |
401 | memcpy(&kstate->regs.fxsave, &ustate->fxsave, sizeof(ustate->fxsave)); |
402 | return 0; |
403 | } |
404 | |
405 | if (ustate->xsave.header.xfeatures & ~xcr0) |
406 | return -EINVAL; |
407 | |
408 | /* |
409 | * Nullify @vpkru to preserve its current value if PKRU's bit isn't set |
410 | * in the header. KVM's odd ABI is to leave PKRU untouched in this |
411 | * case (all other components are eventually re-initialized). |
412 | */ |
413 | if (!(ustate->xsave.header.xfeatures & XFEATURE_MASK_PKRU)) |
414 | vpkru = NULL; |
415 | |
416 | return copy_uabi_from_kernel_to_xstate(fpstate: kstate, kbuf: ustate, pkru: vpkru); |
417 | } |
418 | EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate); |
419 | #endif /* CONFIG_KVM */ |
420 | |
421 | void kernel_fpu_begin_mask(unsigned int kfpu_mask) |
422 | { |
423 | preempt_disable(); |
424 | |
425 | WARN_ON_FPU(!irq_fpu_usable()); |
426 | WARN_ON_FPU(this_cpu_read(in_kernel_fpu)); |
427 | |
428 | this_cpu_write(in_kernel_fpu, true); |
429 | |
430 | if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) && |
431 | !test_thread_flag(TIF_NEED_FPU_LOAD)) { |
432 | set_thread_flag(TIF_NEED_FPU_LOAD); |
433 | save_fpregs_to_fpstate(fpu: ¤t->thread.fpu); |
434 | } |
435 | __cpu_invalidate_fpregs_state(); |
436 | |
437 | /* Put sane initial values into the control registers. */ |
438 | if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM)) |
439 | ldmxcsr(MXCSR_DEFAULT); |
440 | |
441 | if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU)) |
442 | asm volatile ("fninit" ); |
443 | } |
444 | EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask); |
445 | |
446 | void kernel_fpu_end(void) |
447 | { |
448 | WARN_ON_FPU(!this_cpu_read(in_kernel_fpu)); |
449 | |
450 | this_cpu_write(in_kernel_fpu, false); |
451 | preempt_enable(); |
452 | } |
453 | EXPORT_SYMBOL_GPL(kernel_fpu_end); |
454 | |
455 | /* |
456 | * Sync the FPU register state to current's memory register state when the |
457 | * current task owns the FPU. The hardware register state is preserved. |
458 | */ |
459 | void fpu_sync_fpstate(struct fpu *fpu) |
460 | { |
461 | WARN_ON_FPU(fpu != ¤t->thread.fpu); |
462 | |
463 | fpregs_lock(); |
464 | trace_x86_fpu_before_save(fpu); |
465 | |
466 | if (!test_thread_flag(TIF_NEED_FPU_LOAD)) |
467 | save_fpregs_to_fpstate(fpu); |
468 | |
469 | trace_x86_fpu_after_save(fpu); |
470 | fpregs_unlock(); |
471 | } |
472 | |
473 | static inline unsigned int init_fpstate_copy_size(void) |
474 | { |
475 | if (!use_xsave()) |
476 | return fpu_kernel_cfg.default_size; |
477 | |
478 | /* XSAVE(S) just needs the legacy and the xstate header part */ |
479 | return sizeof(init_fpstate.regs.xsave); |
480 | } |
481 | |
482 | static inline void fpstate_init_fxstate(struct fpstate *fpstate) |
483 | { |
484 | fpstate->regs.fxsave.cwd = 0x37f; |
485 | fpstate->regs.fxsave.mxcsr = MXCSR_DEFAULT; |
486 | } |
487 | |
488 | /* |
489 | * Legacy x87 fpstate state init: |
490 | */ |
491 | static inline void fpstate_init_fstate(struct fpstate *fpstate) |
492 | { |
493 | fpstate->regs.fsave.cwd = 0xffff037fu; |
494 | fpstate->regs.fsave.swd = 0xffff0000u; |
495 | fpstate->regs.fsave.twd = 0xffffffffu; |
496 | fpstate->regs.fsave.fos = 0xffff0000u; |
497 | } |
498 | |
499 | /* |
500 | * Used in two places: |
501 | * 1) Early boot to setup init_fpstate for non XSAVE systems |
502 | * 2) fpu_init_fpstate_user() which is invoked from KVM |
503 | */ |
504 | void fpstate_init_user(struct fpstate *fpstate) |
505 | { |
506 | if (!cpu_feature_enabled(X86_FEATURE_FPU)) { |
507 | fpstate_init_soft(soft: &fpstate->regs.soft); |
508 | return; |
509 | } |
510 | |
511 | xstate_init_xcomp_bv(xsave: &fpstate->regs.xsave, mask: fpstate->xfeatures); |
512 | |
513 | if (cpu_feature_enabled(X86_FEATURE_FXSR)) |
514 | fpstate_init_fxstate(fpstate); |
515 | else |
516 | fpstate_init_fstate(fpstate); |
517 | } |
518 | |
519 | static void __fpstate_reset(struct fpstate *fpstate, u64 xfd) |
520 | { |
521 | /* Initialize sizes and feature masks */ |
522 | fpstate->size = fpu_kernel_cfg.default_size; |
523 | fpstate->user_size = fpu_user_cfg.default_size; |
524 | fpstate->xfeatures = fpu_kernel_cfg.default_features; |
525 | fpstate->user_xfeatures = fpu_user_cfg.default_features; |
526 | fpstate->xfd = xfd; |
527 | } |
528 | |
529 | void fpstate_reset(struct fpu *fpu) |
530 | { |
531 | /* Set the fpstate pointer to the default fpstate */ |
532 | fpu->fpstate = &fpu->__fpstate; |
533 | __fpstate_reset(fpstate: fpu->fpstate, xfd: init_fpstate.xfd); |
534 | |
535 | /* Initialize the permission related info in fpu */ |
536 | fpu->perm.__state_perm = fpu_kernel_cfg.default_features; |
537 | fpu->perm.__state_size = fpu_kernel_cfg.default_size; |
538 | fpu->perm.__user_state_size = fpu_user_cfg.default_size; |
539 | /* Same defaults for guests */ |
540 | fpu->guest_perm = fpu->perm; |
541 | } |
542 | |
543 | static inline void fpu_inherit_perms(struct fpu *dst_fpu) |
544 | { |
545 | if (fpu_state_size_dynamic()) { |
546 | struct fpu *src_fpu = ¤t->group_leader->thread.fpu; |
547 | |
548 | spin_lock_irq(lock: ¤t->sighand->siglock); |
549 | /* Fork also inherits the permissions of the parent */ |
550 | dst_fpu->perm = src_fpu->perm; |
551 | dst_fpu->guest_perm = src_fpu->guest_perm; |
552 | spin_unlock_irq(lock: ¤t->sighand->siglock); |
553 | } |
554 | } |
555 | |
556 | /* A passed ssp of zero will not cause any update */ |
557 | static int update_fpu_shstk(struct task_struct *dst, unsigned long ssp) |
558 | { |
559 | #ifdef CONFIG_X86_USER_SHADOW_STACK |
560 | struct cet_user_state *xstate; |
561 | |
562 | /* If ssp update is not needed. */ |
563 | if (!ssp) |
564 | return 0; |
565 | |
566 | xstate = get_xsave_addr(xsave: &dst->thread.fpu.fpstate->regs.xsave, |
567 | xfeature_nr: XFEATURE_CET_USER); |
568 | |
569 | /* |
570 | * If there is a non-zero ssp, then 'dst' must be configured with a shadow |
571 | * stack and the fpu state should be up to date since it was just copied |
572 | * from the parent in fpu_clone(). So there must be a valid non-init CET |
573 | * state location in the buffer. |
574 | */ |
575 | if (WARN_ON_ONCE(!xstate)) |
576 | return 1; |
577 | |
578 | xstate->user_ssp = (u64)ssp; |
579 | #endif |
580 | return 0; |
581 | } |
582 | |
583 | /* Clone current's FPU state on fork */ |
584 | int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal, |
585 | unsigned long ssp) |
586 | { |
587 | struct fpu *src_fpu = ¤t->thread.fpu; |
588 | struct fpu *dst_fpu = &dst->thread.fpu; |
589 | |
590 | /* The new task's FPU state cannot be valid in the hardware. */ |
591 | dst_fpu->last_cpu = -1; |
592 | |
593 | fpstate_reset(fpu: dst_fpu); |
594 | |
595 | if (!cpu_feature_enabled(X86_FEATURE_FPU)) |
596 | return 0; |
597 | |
598 | /* |
599 | * Enforce reload for user space tasks and prevent kernel threads |
600 | * from trying to save the FPU registers on context switch. |
601 | */ |
602 | set_tsk_thread_flag(tsk: dst, TIF_NEED_FPU_LOAD); |
603 | |
604 | /* |
605 | * No FPU state inheritance for kernel threads and IO |
606 | * worker threads. |
607 | */ |
608 | if (minimal) { |
609 | /* Clear out the minimal state */ |
610 | memcpy(&dst_fpu->fpstate->regs, &init_fpstate.regs, |
611 | init_fpstate_copy_size()); |
612 | return 0; |
613 | } |
614 | |
615 | /* |
616 | * If a new feature is added, ensure all dynamic features are |
617 | * caller-saved from here! |
618 | */ |
619 | BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA); |
620 | |
621 | /* |
622 | * Save the default portion of the current FPU state into the |
623 | * clone. Assume all dynamic features to be defined as caller- |
624 | * saved, which enables skipping both the expansion of fpstate |
625 | * and the copying of any dynamic state. |
626 | * |
627 | * Do not use memcpy() when TIF_NEED_FPU_LOAD is set because |
628 | * copying is not valid when current uses non-default states. |
629 | */ |
630 | fpregs_lock(); |
631 | if (test_thread_flag(TIF_NEED_FPU_LOAD)) |
632 | fpregs_restore_userregs(); |
633 | save_fpregs_to_fpstate(fpu: dst_fpu); |
634 | fpregs_unlock(); |
635 | if (!(clone_flags & CLONE_THREAD)) |
636 | fpu_inherit_perms(dst_fpu); |
637 | |
638 | /* |
639 | * Children never inherit PASID state. |
640 | * Force it to have its init value: |
641 | */ |
642 | if (use_xsave()) |
643 | dst_fpu->fpstate->regs.xsave.header.xfeatures &= ~XFEATURE_MASK_PASID; |
644 | |
645 | /* |
646 | * Update shadow stack pointer, in case it changed during clone. |
647 | */ |
648 | if (update_fpu_shstk(dst, ssp)) |
649 | return 1; |
650 | |
651 | trace_x86_fpu_copy_src(fpu: src_fpu); |
652 | trace_x86_fpu_copy_dst(fpu: dst_fpu); |
653 | |
654 | return 0; |
655 | } |
656 | |
657 | /* |
658 | * Whitelist the FPU register state embedded into task_struct for hardened |
659 | * usercopy. |
660 | */ |
661 | void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size) |
662 | { |
663 | *offset = offsetof(struct thread_struct, fpu.__fpstate.regs); |
664 | *size = fpu_kernel_cfg.default_size; |
665 | } |
666 | |
667 | /* |
668 | * Drops current FPU state: deactivates the fpregs and |
669 | * the fpstate. NOTE: it still leaves previous contents |
670 | * in the fpregs in the eager-FPU case. |
671 | * |
672 | * This function can be used in cases where we know that |
673 | * a state-restore is coming: either an explicit one, |
674 | * or a reschedule. |
675 | */ |
676 | void fpu__drop(struct fpu *fpu) |
677 | { |
678 | preempt_disable(); |
679 | |
680 | if (fpu == ¤t->thread.fpu) { |
681 | /* Ignore delayed exceptions from user space */ |
682 | asm volatile("1: fwait\n" |
683 | "2:\n" |
684 | _ASM_EXTABLE(1b, 2b)); |
685 | fpregs_deactivate(fpu); |
686 | } |
687 | |
688 | trace_x86_fpu_dropped(fpu); |
689 | |
690 | preempt_enable(); |
691 | } |
692 | |
693 | /* |
694 | * Clear FPU registers by setting them up from the init fpstate. |
695 | * Caller must do fpregs_[un]lock() around it. |
696 | */ |
697 | static inline void restore_fpregs_from_init_fpstate(u64 features_mask) |
698 | { |
699 | if (use_xsave()) |
700 | os_xrstor(fpstate: &init_fpstate, mask: features_mask); |
701 | else if (use_fxsr()) |
702 | fxrstor(fx: &init_fpstate.regs.fxsave); |
703 | else |
704 | frstor(fx: &init_fpstate.regs.fsave); |
705 | |
706 | pkru_write_default(); |
707 | } |
708 | |
709 | /* |
710 | * Reset current->fpu memory state to the init values. |
711 | */ |
712 | static void fpu_reset_fpregs(void) |
713 | { |
714 | struct fpu *fpu = ¤t->thread.fpu; |
715 | |
716 | fpregs_lock(); |
717 | __fpu_invalidate_fpregs_state(fpu); |
718 | /* |
719 | * This does not change the actual hardware registers. It just |
720 | * resets the memory image and sets TIF_NEED_FPU_LOAD so a |
721 | * subsequent return to usermode will reload the registers from the |
722 | * task's memory image. |
723 | * |
724 | * Do not use fpstate_init() here. Just copy init_fpstate which has |
725 | * the correct content already except for PKRU. |
726 | * |
727 | * PKRU handling does not rely on the xstate when restoring for |
728 | * user space as PKRU is eagerly written in switch_to() and |
729 | * flush_thread(). |
730 | */ |
731 | memcpy(&fpu->fpstate->regs, &init_fpstate.regs, init_fpstate_copy_size()); |
732 | set_thread_flag(TIF_NEED_FPU_LOAD); |
733 | fpregs_unlock(); |
734 | } |
735 | |
736 | /* |
737 | * Reset current's user FPU states to the init states. current's |
738 | * supervisor states, if any, are not modified by this function. The |
739 | * caller guarantees that the XSTATE header in memory is intact. |
740 | */ |
741 | void fpu__clear_user_states(struct fpu *fpu) |
742 | { |
743 | WARN_ON_FPU(fpu != ¤t->thread.fpu); |
744 | |
745 | fpregs_lock(); |
746 | if (!cpu_feature_enabled(X86_FEATURE_FPU)) { |
747 | fpu_reset_fpregs(); |
748 | fpregs_unlock(); |
749 | return; |
750 | } |
751 | |
752 | /* |
753 | * Ensure that current's supervisor states are loaded into their |
754 | * corresponding registers. |
755 | */ |
756 | if (xfeatures_mask_supervisor() && |
757 | !fpregs_state_valid(fpu, smp_processor_id())) |
758 | os_xrstor_supervisor(fpstate: fpu->fpstate); |
759 | |
760 | /* Reset user states in registers. */ |
761 | restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE); |
762 | |
763 | /* |
764 | * Now all FPU registers have their desired values. Inform the FPU |
765 | * state machine that current's FPU registers are in the hardware |
766 | * registers. The memory image does not need to be updated because |
767 | * any operation relying on it has to save the registers first when |
768 | * current's FPU is marked active. |
769 | */ |
770 | fpregs_mark_activate(); |
771 | fpregs_unlock(); |
772 | } |
773 | |
774 | void fpu_flush_thread(void) |
775 | { |
776 | fpstate_reset(fpu: ¤t->thread.fpu); |
777 | fpu_reset_fpregs(); |
778 | } |
779 | /* |
780 | * Load FPU context before returning to userspace. |
781 | */ |
782 | void switch_fpu_return(void) |
783 | { |
784 | if (!static_cpu_has(X86_FEATURE_FPU)) |
785 | return; |
786 | |
787 | fpregs_restore_userregs(); |
788 | } |
789 | EXPORT_SYMBOL_GPL(switch_fpu_return); |
790 | |
791 | void fpregs_lock_and_load(void) |
792 | { |
793 | /* |
794 | * fpregs_lock() only disables preemption (mostly). So modifying state |
795 | * in an interrupt could screw up some in progress fpregs operation. |
796 | * Warn about it. |
797 | */ |
798 | WARN_ON_ONCE(!irq_fpu_usable()); |
799 | WARN_ON_ONCE(current->flags & PF_KTHREAD); |
800 | |
801 | fpregs_lock(); |
802 | |
803 | fpregs_assert_state_consistent(); |
804 | |
805 | if (test_thread_flag(TIF_NEED_FPU_LOAD)) |
806 | fpregs_restore_userregs(); |
807 | } |
808 | |
809 | #ifdef CONFIG_X86_DEBUG_FPU |
810 | /* |
811 | * If current FPU state according to its tracking (loaded FPU context on this |
812 | * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is |
813 | * loaded on return to userland. |
814 | */ |
815 | void fpregs_assert_state_consistent(void) |
816 | { |
817 | struct fpu *fpu = ¤t->thread.fpu; |
818 | |
819 | if (test_thread_flag(TIF_NEED_FPU_LOAD)) |
820 | return; |
821 | |
822 | WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id())); |
823 | } |
824 | EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent); |
825 | #endif |
826 | |
827 | void fpregs_mark_activate(void) |
828 | { |
829 | struct fpu *fpu = ¤t->thread.fpu; |
830 | |
831 | fpregs_activate(fpu); |
832 | fpu->last_cpu = smp_processor_id(); |
833 | clear_thread_flag(TIF_NEED_FPU_LOAD); |
834 | } |
835 | |
836 | /* |
837 | * x87 math exception handling: |
838 | */ |
839 | |
840 | int fpu__exception_code(struct fpu *fpu, int trap_nr) |
841 | { |
842 | int err; |
843 | |
844 | if (trap_nr == X86_TRAP_MF) { |
845 | unsigned short cwd, swd; |
846 | /* |
847 | * (~cwd & swd) will mask out exceptions that are not set to unmasked |
848 | * status. 0x3f is the exception bits in these regs, 0x200 is the |
849 | * C1 reg you need in case of a stack fault, 0x040 is the stack |
850 | * fault bit. We should only be taking one exception at a time, |
851 | * so if this combination doesn't produce any single exception, |
852 | * then we have a bad program that isn't synchronizing its FPU usage |
853 | * and it will suffer the consequences since we won't be able to |
854 | * fully reproduce the context of the exception. |
855 | */ |
856 | if (boot_cpu_has(X86_FEATURE_FXSR)) { |
857 | cwd = fpu->fpstate->regs.fxsave.cwd; |
858 | swd = fpu->fpstate->regs.fxsave.swd; |
859 | } else { |
860 | cwd = (unsigned short)fpu->fpstate->regs.fsave.cwd; |
861 | swd = (unsigned short)fpu->fpstate->regs.fsave.swd; |
862 | } |
863 | |
864 | err = swd & ~cwd; |
865 | } else { |
866 | /* |
867 | * The SIMD FPU exceptions are handled a little differently, as there |
868 | * is only a single status/control register. Thus, to determine which |
869 | * unmasked exception was caught we must mask the exception mask bits |
870 | * at 0x1f80, and then use these to mask the exception bits at 0x3f. |
871 | */ |
872 | unsigned short mxcsr = MXCSR_DEFAULT; |
873 | |
874 | if (boot_cpu_has(X86_FEATURE_XMM)) |
875 | mxcsr = fpu->fpstate->regs.fxsave.mxcsr; |
876 | |
877 | err = ~(mxcsr >> 7) & mxcsr; |
878 | } |
879 | |
880 | if (err & 0x001) { /* Invalid op */ |
881 | /* |
882 | * swd & 0x240 == 0x040: Stack Underflow |
883 | * swd & 0x240 == 0x240: Stack Overflow |
884 | * User must clear the SF bit (0x40) if set |
885 | */ |
886 | return FPE_FLTINV; |
887 | } else if (err & 0x004) { /* Divide by Zero */ |
888 | return FPE_FLTDIV; |
889 | } else if (err & 0x008) { /* Overflow */ |
890 | return FPE_FLTOVF; |
891 | } else if (err & 0x012) { /* Denormal, Underflow */ |
892 | return FPE_FLTUND; |
893 | } else if (err & 0x020) { /* Precision */ |
894 | return FPE_FLTRES; |
895 | } |
896 | |
897 | /* |
898 | * If we're using IRQ 13, or supposedly even some trap |
899 | * X86_TRAP_MF implementations, it's possible |
900 | * we get a spurious trap, which is not an error. |
901 | */ |
902 | return 0; |
903 | } |
904 | |
905 | /* |
906 | * Initialize register state that may prevent from entering low-power idle. |
907 | * This function will be invoked from the cpuidle driver only when needed. |
908 | */ |
909 | noinstr void fpu_idle_fpregs(void) |
910 | { |
911 | /* Note: AMX_TILE being enabled implies XGETBV1 support */ |
912 | if (cpu_feature_enabled(X86_FEATURE_AMX_TILE) && |
913 | (xfeatures_in_use() & XFEATURE_MASK_XTILE)) { |
914 | tile_release(); |
915 | __this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
916 | } |
917 | } |
918 | |