1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 1994 Linus Torvalds |
4 | * |
5 | * Cyrix stuff, June 1998 by: |
6 | * - Rafael R. Reilova (moved everything from head.S), |
7 | * <rreilova@ececs.uc.edu> |
8 | * - Channing Corn (tests & fixes), |
9 | * - Andrew D. Balsa (code cleanup). |
10 | */ |
11 | #include <linux/init.h> |
12 | #include <linux/cpu.h> |
13 | #include <linux/module.h> |
14 | #include <linux/nospec.h> |
15 | #include <linux/prctl.h> |
16 | #include <linux/sched/smt.h> |
17 | #include <linux/pgtable.h> |
18 | #include <linux/bpf.h> |
19 | |
20 | #include <asm/spec-ctrl.h> |
21 | #include <asm/cmdline.h> |
22 | #include <asm/bugs.h> |
23 | #include <asm/processor.h> |
24 | #include <asm/processor-flags.h> |
25 | #include <asm/fpu/api.h> |
26 | #include <asm/msr.h> |
27 | #include <asm/vmx.h> |
28 | #include <asm/paravirt.h> |
29 | #include <asm/intel-family.h> |
30 | #include <asm/e820/api.h> |
31 | #include <asm/hypervisor.h> |
32 | #include <asm/tlbflush.h> |
33 | #include <asm/cpu.h> |
34 | |
35 | #include "cpu.h" |
36 | |
37 | static void __init spectre_v1_select_mitigation(void); |
38 | static void __init spectre_v2_select_mitigation(void); |
39 | static void __init retbleed_select_mitigation(void); |
40 | static void __init spectre_v2_user_select_mitigation(void); |
41 | static void __init ssb_select_mitigation(void); |
42 | static void __init l1tf_select_mitigation(void); |
43 | static void __init mds_select_mitigation(void); |
44 | static void __init md_clear_update_mitigation(void); |
45 | static void __init md_clear_select_mitigation(void); |
46 | static void __init taa_select_mitigation(void); |
47 | static void __init mmio_select_mitigation(void); |
48 | static void __init srbds_select_mitigation(void); |
49 | static void __init l1d_flush_select_mitigation(void); |
50 | static void __init srso_select_mitigation(void); |
51 | static void __init gds_select_mitigation(void); |
52 | |
53 | /* The base value of the SPEC_CTRL MSR without task-specific bits set */ |
54 | u64 x86_spec_ctrl_base; |
55 | EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); |
56 | |
57 | /* The current value of the SPEC_CTRL MSR with task-specific bits set */ |
58 | DEFINE_PER_CPU(u64, x86_spec_ctrl_current); |
59 | EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current); |
60 | |
61 | u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; |
62 | EXPORT_SYMBOL_GPL(x86_pred_cmd); |
63 | |
64 | static u64 __ro_after_init x86_arch_cap_msr; |
65 | |
66 | static DEFINE_MUTEX(spec_ctrl_mutex); |
67 | |
68 | void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk; |
69 | |
70 | /* Update SPEC_CTRL MSR and its cached copy unconditionally */ |
71 | static void update_spec_ctrl(u64 val) |
72 | { |
73 | this_cpu_write(x86_spec_ctrl_current, val); |
74 | wrmsrl(MSR_IA32_SPEC_CTRL, val); |
75 | } |
76 | |
77 | /* |
78 | * Keep track of the SPEC_CTRL MSR value for the current task, which may differ |
79 | * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). |
80 | */ |
81 | void update_spec_ctrl_cond(u64 val) |
82 | { |
83 | if (this_cpu_read(x86_spec_ctrl_current) == val) |
84 | return; |
85 | |
86 | this_cpu_write(x86_spec_ctrl_current, val); |
87 | |
88 | /* |
89 | * When KERNEL_IBRS this MSR is written on return-to-user, unless |
90 | * forced the update can be delayed until that time. |
91 | */ |
92 | if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) |
93 | wrmsrl(MSR_IA32_SPEC_CTRL, val); |
94 | } |
95 | |
96 | noinstr u64 spec_ctrl_current(void) |
97 | { |
98 | return this_cpu_read(x86_spec_ctrl_current); |
99 | } |
100 | EXPORT_SYMBOL_GPL(spec_ctrl_current); |
101 | |
102 | /* |
103 | * AMD specific MSR info for Speculative Store Bypass control. |
104 | * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). |
105 | */ |
106 | u64 __ro_after_init x86_amd_ls_cfg_base; |
107 | u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; |
108 | |
109 | /* Control conditional STIBP in switch_to() */ |
110 | DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); |
111 | /* Control conditional IBPB in switch_mm() */ |
112 | DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); |
113 | /* Control unconditional IBPB in switch_mm() */ |
114 | DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); |
115 | |
116 | /* Control MDS CPU buffer clear before idling (halt, mwait) */ |
117 | DEFINE_STATIC_KEY_FALSE(mds_idle_clear); |
118 | EXPORT_SYMBOL_GPL(mds_idle_clear); |
119 | |
120 | /* |
121 | * Controls whether l1d flush based mitigations are enabled, |
122 | * based on hw features and admin setting via boot parameter |
123 | * defaults to false |
124 | */ |
125 | DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); |
126 | |
127 | /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */ |
128 | DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); |
129 | EXPORT_SYMBOL_GPL(mmio_stale_data_clear); |
130 | |
131 | void __init cpu_select_mitigations(void) |
132 | { |
133 | /* |
134 | * Read the SPEC_CTRL MSR to account for reserved bits which may |
135 | * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD |
136 | * init code as it is not enumerated and depends on the family. |
137 | */ |
138 | if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { |
139 | rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
140 | |
141 | /* |
142 | * Previously running kernel (kexec), may have some controls |
143 | * turned ON. Clear them and let the mitigations setup below |
144 | * rediscover them based on configuration. |
145 | */ |
146 | x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; |
147 | } |
148 | |
149 | x86_arch_cap_msr = x86_read_arch_cap_msr(); |
150 | |
151 | /* Select the proper CPU mitigations before patching alternatives: */ |
152 | spectre_v1_select_mitigation(); |
153 | spectre_v2_select_mitigation(); |
154 | /* |
155 | * retbleed_select_mitigation() relies on the state set by |
156 | * spectre_v2_select_mitigation(); specifically it wants to know about |
157 | * spectre_v2=ibrs. |
158 | */ |
159 | retbleed_select_mitigation(); |
160 | /* |
161 | * spectre_v2_user_select_mitigation() relies on the state set by |
162 | * retbleed_select_mitigation(); specifically the STIBP selection is |
163 | * forced for UNRET or IBPB. |
164 | */ |
165 | spectre_v2_user_select_mitigation(); |
166 | ssb_select_mitigation(); |
167 | l1tf_select_mitigation(); |
168 | md_clear_select_mitigation(); |
169 | srbds_select_mitigation(); |
170 | l1d_flush_select_mitigation(); |
171 | |
172 | /* |
173 | * srso_select_mitigation() depends and must run after |
174 | * retbleed_select_mitigation(). |
175 | */ |
176 | srso_select_mitigation(); |
177 | gds_select_mitigation(); |
178 | } |
179 | |
180 | /* |
181 | * NOTE: This function is *only* called for SVM, since Intel uses |
182 | * MSR_IA32_SPEC_CTRL for SSBD. |
183 | */ |
184 | void |
185 | x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest) |
186 | { |
187 | u64 guestval, hostval; |
188 | struct thread_info *ti = current_thread_info(); |
189 | |
190 | /* |
191 | * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update |
192 | * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. |
193 | */ |
194 | if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && |
195 | !static_cpu_has(X86_FEATURE_VIRT_SSBD)) |
196 | return; |
197 | |
198 | /* |
199 | * If the host has SSBD mitigation enabled, force it in the host's |
200 | * virtual MSR value. If its not permanently enabled, evaluate |
201 | * current's TIF_SSBD thread flag. |
202 | */ |
203 | if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) |
204 | hostval = SPEC_CTRL_SSBD; |
205 | else |
206 | hostval = ssbd_tif_to_spec_ctrl(tifn: ti->flags); |
207 | |
208 | /* Sanitize the guest value */ |
209 | guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; |
210 | |
211 | if (hostval != guestval) { |
212 | unsigned long tif; |
213 | |
214 | tif = setguest ? ssbd_spec_ctrl_to_tif(spec_ctrl: guestval) : |
215 | ssbd_spec_ctrl_to_tif(spec_ctrl: hostval); |
216 | |
217 | speculation_ctrl_update(tif); |
218 | } |
219 | } |
220 | EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); |
221 | |
222 | static void x86_amd_ssb_disable(void) |
223 | { |
224 | u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; |
225 | |
226 | if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) |
227 | wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); |
228 | else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) |
229 | wrmsrl(MSR_AMD64_LS_CFG, val: msrval); |
230 | } |
231 | |
232 | #undef pr_fmt |
233 | #define pr_fmt(fmt) "MDS: " fmt |
234 | |
235 | /* Default mitigation for MDS-affected CPUs */ |
236 | static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL; |
237 | static bool mds_nosmt __ro_after_init = false; |
238 | |
239 | static const char * const mds_strings[] = { |
240 | [MDS_MITIGATION_OFF] = "Vulnerable" , |
241 | [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers" , |
242 | [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode" , |
243 | }; |
244 | |
245 | static void __init mds_select_mitigation(void) |
246 | { |
247 | if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { |
248 | mds_mitigation = MDS_MITIGATION_OFF; |
249 | return; |
250 | } |
251 | |
252 | if (mds_mitigation == MDS_MITIGATION_FULL) { |
253 | if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) |
254 | mds_mitigation = MDS_MITIGATION_VMWERV; |
255 | |
256 | setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); |
257 | |
258 | if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && |
259 | (mds_nosmt || cpu_mitigations_auto_nosmt())) |
260 | cpu_smt_disable(force: false); |
261 | } |
262 | } |
263 | |
264 | static int __init mds_cmdline(char *str) |
265 | { |
266 | if (!boot_cpu_has_bug(X86_BUG_MDS)) |
267 | return 0; |
268 | |
269 | if (!str) |
270 | return -EINVAL; |
271 | |
272 | if (!strcmp(str, "off" )) |
273 | mds_mitigation = MDS_MITIGATION_OFF; |
274 | else if (!strcmp(str, "full" )) |
275 | mds_mitigation = MDS_MITIGATION_FULL; |
276 | else if (!strcmp(str, "full,nosmt" )) { |
277 | mds_mitigation = MDS_MITIGATION_FULL; |
278 | mds_nosmt = true; |
279 | } |
280 | |
281 | return 0; |
282 | } |
283 | early_param("mds" , mds_cmdline); |
284 | |
285 | #undef pr_fmt |
286 | #define pr_fmt(fmt) "TAA: " fmt |
287 | |
288 | enum taa_mitigations { |
289 | TAA_MITIGATION_OFF, |
290 | TAA_MITIGATION_UCODE_NEEDED, |
291 | TAA_MITIGATION_VERW, |
292 | TAA_MITIGATION_TSX_DISABLED, |
293 | }; |
294 | |
295 | /* Default mitigation for TAA-affected CPUs */ |
296 | static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW; |
297 | static bool taa_nosmt __ro_after_init; |
298 | |
299 | static const char * const taa_strings[] = { |
300 | [TAA_MITIGATION_OFF] = "Vulnerable" , |
301 | [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode" , |
302 | [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers" , |
303 | [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled" , |
304 | }; |
305 | |
306 | static void __init taa_select_mitigation(void) |
307 | { |
308 | if (!boot_cpu_has_bug(X86_BUG_TAA)) { |
309 | taa_mitigation = TAA_MITIGATION_OFF; |
310 | return; |
311 | } |
312 | |
313 | /* TSX previously disabled by tsx=off */ |
314 | if (!boot_cpu_has(X86_FEATURE_RTM)) { |
315 | taa_mitigation = TAA_MITIGATION_TSX_DISABLED; |
316 | return; |
317 | } |
318 | |
319 | if (cpu_mitigations_off()) { |
320 | taa_mitigation = TAA_MITIGATION_OFF; |
321 | return; |
322 | } |
323 | |
324 | /* |
325 | * TAA mitigation via VERW is turned off if both |
326 | * tsx_async_abort=off and mds=off are specified. |
327 | */ |
328 | if (taa_mitigation == TAA_MITIGATION_OFF && |
329 | mds_mitigation == MDS_MITIGATION_OFF) |
330 | return; |
331 | |
332 | if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) |
333 | taa_mitigation = TAA_MITIGATION_VERW; |
334 | else |
335 | taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; |
336 | |
337 | /* |
338 | * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. |
339 | * A microcode update fixes this behavior to clear CPU buffers. It also |
340 | * adds support for MSR_IA32_TSX_CTRL which is enumerated by the |
341 | * ARCH_CAP_TSX_CTRL_MSR bit. |
342 | * |
343 | * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode |
344 | * update is required. |
345 | */ |
346 | if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) && |
347 | !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)) |
348 | taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; |
349 | |
350 | /* |
351 | * TSX is enabled, select alternate mitigation for TAA which is |
352 | * the same as MDS. Enable MDS static branch to clear CPU buffers. |
353 | * |
354 | * For guests that can't determine whether the correct microcode is |
355 | * present on host, enable the mitigation for UCODE_NEEDED as well. |
356 | */ |
357 | setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); |
358 | |
359 | if (taa_nosmt || cpu_mitigations_auto_nosmt()) |
360 | cpu_smt_disable(force: false); |
361 | } |
362 | |
363 | static int __init tsx_async_abort_parse_cmdline(char *str) |
364 | { |
365 | if (!boot_cpu_has_bug(X86_BUG_TAA)) |
366 | return 0; |
367 | |
368 | if (!str) |
369 | return -EINVAL; |
370 | |
371 | if (!strcmp(str, "off" )) { |
372 | taa_mitigation = TAA_MITIGATION_OFF; |
373 | } else if (!strcmp(str, "full" )) { |
374 | taa_mitigation = TAA_MITIGATION_VERW; |
375 | } else if (!strcmp(str, "full,nosmt" )) { |
376 | taa_mitigation = TAA_MITIGATION_VERW; |
377 | taa_nosmt = true; |
378 | } |
379 | |
380 | return 0; |
381 | } |
382 | early_param("tsx_async_abort" , tsx_async_abort_parse_cmdline); |
383 | |
384 | #undef pr_fmt |
385 | #define pr_fmt(fmt) "MMIO Stale Data: " fmt |
386 | |
387 | enum mmio_mitigations { |
388 | MMIO_MITIGATION_OFF, |
389 | MMIO_MITIGATION_UCODE_NEEDED, |
390 | MMIO_MITIGATION_VERW, |
391 | }; |
392 | |
393 | /* Default mitigation for Processor MMIO Stale Data vulnerabilities */ |
394 | static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW; |
395 | static bool mmio_nosmt __ro_after_init = false; |
396 | |
397 | static const char * const mmio_strings[] = { |
398 | [MMIO_MITIGATION_OFF] = "Vulnerable" , |
399 | [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode" , |
400 | [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers" , |
401 | }; |
402 | |
403 | static void __init mmio_select_mitigation(void) |
404 | { |
405 | if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || |
406 | boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) || |
407 | cpu_mitigations_off()) { |
408 | mmio_mitigation = MMIO_MITIGATION_OFF; |
409 | return; |
410 | } |
411 | |
412 | if (mmio_mitigation == MMIO_MITIGATION_OFF) |
413 | return; |
414 | |
415 | /* |
416 | * Enable CPU buffer clear mitigation for host and VMM, if also affected |
417 | * by MDS or TAA. Otherwise, enable mitigation for VMM only. |
418 | */ |
419 | if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && |
420 | boot_cpu_has(X86_FEATURE_RTM))) |
421 | setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); |
422 | |
423 | /* |
424 | * X86_FEATURE_CLEAR_CPU_BUF could be enabled by other VERW based |
425 | * mitigations, disable KVM-only mitigation in that case. |
426 | */ |
427 | if (boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF)) |
428 | static_branch_disable(&mmio_stale_data_clear); |
429 | else |
430 | static_branch_enable(&mmio_stale_data_clear); |
431 | |
432 | /* |
433 | * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can |
434 | * be propagated to uncore buffers, clearing the Fill buffers on idle |
435 | * is required irrespective of SMT state. |
436 | */ |
437 | if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) |
438 | static_branch_enable(&mds_idle_clear); |
439 | |
440 | /* |
441 | * Check if the system has the right microcode. |
442 | * |
443 | * CPU Fill buffer clear mitigation is enumerated by either an explicit |
444 | * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS |
445 | * affected systems. |
446 | */ |
447 | if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) || |
448 | (boot_cpu_has(X86_FEATURE_MD_CLEAR) && |
449 | boot_cpu_has(X86_FEATURE_FLUSH_L1D) && |
450 | !(x86_arch_cap_msr & ARCH_CAP_MDS_NO))) |
451 | mmio_mitigation = MMIO_MITIGATION_VERW; |
452 | else |
453 | mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; |
454 | |
455 | if (mmio_nosmt || cpu_mitigations_auto_nosmt()) |
456 | cpu_smt_disable(force: false); |
457 | } |
458 | |
459 | static int __init mmio_stale_data_parse_cmdline(char *str) |
460 | { |
461 | if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) |
462 | return 0; |
463 | |
464 | if (!str) |
465 | return -EINVAL; |
466 | |
467 | if (!strcmp(str, "off" )) { |
468 | mmio_mitigation = MMIO_MITIGATION_OFF; |
469 | } else if (!strcmp(str, "full" )) { |
470 | mmio_mitigation = MMIO_MITIGATION_VERW; |
471 | } else if (!strcmp(str, "full,nosmt" )) { |
472 | mmio_mitigation = MMIO_MITIGATION_VERW; |
473 | mmio_nosmt = true; |
474 | } |
475 | |
476 | return 0; |
477 | } |
478 | early_param("mmio_stale_data" , mmio_stale_data_parse_cmdline); |
479 | |
480 | #undef pr_fmt |
481 | #define pr_fmt(fmt) "Register File Data Sampling: " fmt |
482 | |
483 | enum rfds_mitigations { |
484 | RFDS_MITIGATION_OFF, |
485 | RFDS_MITIGATION_VERW, |
486 | RFDS_MITIGATION_UCODE_NEEDED, |
487 | }; |
488 | |
489 | /* Default mitigation for Register File Data Sampling */ |
490 | static enum rfds_mitigations rfds_mitigation __ro_after_init = |
491 | IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_VERW : RFDS_MITIGATION_OFF; |
492 | |
493 | static const char * const rfds_strings[] = { |
494 | [RFDS_MITIGATION_OFF] = "Vulnerable" , |
495 | [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File" , |
496 | [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode" , |
497 | }; |
498 | |
499 | static void __init rfds_select_mitigation(void) |
500 | { |
501 | if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) { |
502 | rfds_mitigation = RFDS_MITIGATION_OFF; |
503 | return; |
504 | } |
505 | if (rfds_mitigation == RFDS_MITIGATION_OFF) |
506 | return; |
507 | |
508 | if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR) |
509 | setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); |
510 | else |
511 | rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED; |
512 | } |
513 | |
514 | static __init int rfds_parse_cmdline(char *str) |
515 | { |
516 | if (!str) |
517 | return -EINVAL; |
518 | |
519 | if (!boot_cpu_has_bug(X86_BUG_RFDS)) |
520 | return 0; |
521 | |
522 | if (!strcmp(str, "off" )) |
523 | rfds_mitigation = RFDS_MITIGATION_OFF; |
524 | else if (!strcmp(str, "on" )) |
525 | rfds_mitigation = RFDS_MITIGATION_VERW; |
526 | |
527 | return 0; |
528 | } |
529 | early_param("reg_file_data_sampling" , rfds_parse_cmdline); |
530 | |
531 | #undef pr_fmt |
532 | #define pr_fmt(fmt) "" fmt |
533 | |
534 | static void __init md_clear_update_mitigation(void) |
535 | { |
536 | if (cpu_mitigations_off()) |
537 | return; |
538 | |
539 | if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF)) |
540 | goto out; |
541 | |
542 | /* |
543 | * X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO |
544 | * Stale Data mitigation, if necessary. |
545 | */ |
546 | if (mds_mitigation == MDS_MITIGATION_OFF && |
547 | boot_cpu_has_bug(X86_BUG_MDS)) { |
548 | mds_mitigation = MDS_MITIGATION_FULL; |
549 | mds_select_mitigation(); |
550 | } |
551 | if (taa_mitigation == TAA_MITIGATION_OFF && |
552 | boot_cpu_has_bug(X86_BUG_TAA)) { |
553 | taa_mitigation = TAA_MITIGATION_VERW; |
554 | taa_select_mitigation(); |
555 | } |
556 | /* |
557 | * MMIO_MITIGATION_OFF is not checked here so that mmio_stale_data_clear |
558 | * gets updated correctly as per X86_FEATURE_CLEAR_CPU_BUF state. |
559 | */ |
560 | if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { |
561 | mmio_mitigation = MMIO_MITIGATION_VERW; |
562 | mmio_select_mitigation(); |
563 | } |
564 | if (rfds_mitigation == RFDS_MITIGATION_OFF && |
565 | boot_cpu_has_bug(X86_BUG_RFDS)) { |
566 | rfds_mitigation = RFDS_MITIGATION_VERW; |
567 | rfds_select_mitigation(); |
568 | } |
569 | out: |
570 | if (boot_cpu_has_bug(X86_BUG_MDS)) |
571 | pr_info("MDS: %s\n" , mds_strings[mds_mitigation]); |
572 | if (boot_cpu_has_bug(X86_BUG_TAA)) |
573 | pr_info("TAA: %s\n" , taa_strings[taa_mitigation]); |
574 | if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) |
575 | pr_info("MMIO Stale Data: %s\n" , mmio_strings[mmio_mitigation]); |
576 | else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) |
577 | pr_info("MMIO Stale Data: Unknown: No mitigations\n" ); |
578 | if (boot_cpu_has_bug(X86_BUG_RFDS)) |
579 | pr_info("Register File Data Sampling: %s\n" , rfds_strings[rfds_mitigation]); |
580 | } |
581 | |
582 | static void __init md_clear_select_mitigation(void) |
583 | { |
584 | mds_select_mitigation(); |
585 | taa_select_mitigation(); |
586 | mmio_select_mitigation(); |
587 | rfds_select_mitigation(); |
588 | |
589 | /* |
590 | * As these mitigations are inter-related and rely on VERW instruction |
591 | * to clear the microarchitural buffers, update and print their status |
592 | * after mitigation selection is done for each of these vulnerabilities. |
593 | */ |
594 | md_clear_update_mitigation(); |
595 | } |
596 | |
597 | #undef pr_fmt |
598 | #define pr_fmt(fmt) "SRBDS: " fmt |
599 | |
600 | enum srbds_mitigations { |
601 | SRBDS_MITIGATION_OFF, |
602 | SRBDS_MITIGATION_UCODE_NEEDED, |
603 | SRBDS_MITIGATION_FULL, |
604 | SRBDS_MITIGATION_TSX_OFF, |
605 | SRBDS_MITIGATION_HYPERVISOR, |
606 | }; |
607 | |
608 | static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL; |
609 | |
610 | static const char * const srbds_strings[] = { |
611 | [SRBDS_MITIGATION_OFF] = "Vulnerable" , |
612 | [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode" , |
613 | [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode" , |
614 | [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled" , |
615 | [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status" , |
616 | }; |
617 | |
618 | static bool srbds_off; |
619 | |
620 | void update_srbds_msr(void) |
621 | { |
622 | u64 mcu_ctrl; |
623 | |
624 | if (!boot_cpu_has_bug(X86_BUG_SRBDS)) |
625 | return; |
626 | |
627 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) |
628 | return; |
629 | |
630 | if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) |
631 | return; |
632 | |
633 | /* |
634 | * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX |
635 | * being disabled and it hasn't received the SRBDS MSR microcode. |
636 | */ |
637 | if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) |
638 | return; |
639 | |
640 | rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); |
641 | |
642 | switch (srbds_mitigation) { |
643 | case SRBDS_MITIGATION_OFF: |
644 | case SRBDS_MITIGATION_TSX_OFF: |
645 | mcu_ctrl |= RNGDS_MITG_DIS; |
646 | break; |
647 | case SRBDS_MITIGATION_FULL: |
648 | mcu_ctrl &= ~RNGDS_MITG_DIS; |
649 | break; |
650 | default: |
651 | break; |
652 | } |
653 | |
654 | wrmsrl(MSR_IA32_MCU_OPT_CTRL, val: mcu_ctrl); |
655 | } |
656 | |
657 | static void __init srbds_select_mitigation(void) |
658 | { |
659 | if (!boot_cpu_has_bug(X86_BUG_SRBDS)) |
660 | return; |
661 | |
662 | /* |
663 | * Check to see if this is one of the MDS_NO systems supporting TSX that |
664 | * are only exposed to SRBDS when TSX is enabled or when CPU is affected |
665 | * by Processor MMIO Stale Data vulnerability. |
666 | */ |
667 | if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && |
668 | !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) |
669 | srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; |
670 | else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) |
671 | srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; |
672 | else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) |
673 | srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; |
674 | else if (cpu_mitigations_off() || srbds_off) |
675 | srbds_mitigation = SRBDS_MITIGATION_OFF; |
676 | |
677 | update_srbds_msr(); |
678 | pr_info("%s\n" , srbds_strings[srbds_mitigation]); |
679 | } |
680 | |
681 | static int __init srbds_parse_cmdline(char *str) |
682 | { |
683 | if (!str) |
684 | return -EINVAL; |
685 | |
686 | if (!boot_cpu_has_bug(X86_BUG_SRBDS)) |
687 | return 0; |
688 | |
689 | srbds_off = !strcmp(str, "off" ); |
690 | return 0; |
691 | } |
692 | early_param("srbds" , srbds_parse_cmdline); |
693 | |
694 | #undef pr_fmt |
695 | #define pr_fmt(fmt) "L1D Flush : " fmt |
696 | |
697 | enum l1d_flush_mitigations { |
698 | L1D_FLUSH_OFF = 0, |
699 | L1D_FLUSH_ON, |
700 | }; |
701 | |
702 | static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF; |
703 | |
704 | static void __init l1d_flush_select_mitigation(void) |
705 | { |
706 | if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) |
707 | return; |
708 | |
709 | static_branch_enable(&switch_mm_cond_l1d_flush); |
710 | pr_info("Conditional flush on switch_mm() enabled\n" ); |
711 | } |
712 | |
713 | static int __init l1d_flush_parse_cmdline(char *str) |
714 | { |
715 | if (!strcmp(str, "on" )) |
716 | l1d_flush_mitigation = L1D_FLUSH_ON; |
717 | |
718 | return 0; |
719 | } |
720 | early_param("l1d_flush" , l1d_flush_parse_cmdline); |
721 | |
722 | #undef pr_fmt |
723 | #define pr_fmt(fmt) "GDS: " fmt |
724 | |
725 | enum gds_mitigations { |
726 | GDS_MITIGATION_OFF, |
727 | GDS_MITIGATION_UCODE_NEEDED, |
728 | GDS_MITIGATION_FORCE, |
729 | GDS_MITIGATION_FULL, |
730 | GDS_MITIGATION_FULL_LOCKED, |
731 | GDS_MITIGATION_HYPERVISOR, |
732 | }; |
733 | |
734 | #if IS_ENABLED(CONFIG_MITIGATION_GDS_FORCE) |
735 | static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE; |
736 | #else |
737 | static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL; |
738 | #endif |
739 | |
740 | static const char * const gds_strings[] = { |
741 | [GDS_MITIGATION_OFF] = "Vulnerable" , |
742 | [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode" , |
743 | [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode" , |
744 | [GDS_MITIGATION_FULL] = "Mitigation: Microcode" , |
745 | [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)" , |
746 | [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status" , |
747 | }; |
748 | |
749 | bool gds_ucode_mitigated(void) |
750 | { |
751 | return (gds_mitigation == GDS_MITIGATION_FULL || |
752 | gds_mitigation == GDS_MITIGATION_FULL_LOCKED); |
753 | } |
754 | EXPORT_SYMBOL_GPL(gds_ucode_mitigated); |
755 | |
756 | void update_gds_msr(void) |
757 | { |
758 | u64 mcu_ctrl_after; |
759 | u64 mcu_ctrl; |
760 | |
761 | switch (gds_mitigation) { |
762 | case GDS_MITIGATION_OFF: |
763 | rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); |
764 | mcu_ctrl |= GDS_MITG_DIS; |
765 | break; |
766 | case GDS_MITIGATION_FULL_LOCKED: |
767 | /* |
768 | * The LOCKED state comes from the boot CPU. APs might not have |
769 | * the same state. Make sure the mitigation is enabled on all |
770 | * CPUs. |
771 | */ |
772 | case GDS_MITIGATION_FULL: |
773 | rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); |
774 | mcu_ctrl &= ~GDS_MITG_DIS; |
775 | break; |
776 | case GDS_MITIGATION_FORCE: |
777 | case GDS_MITIGATION_UCODE_NEEDED: |
778 | case GDS_MITIGATION_HYPERVISOR: |
779 | return; |
780 | } |
781 | |
782 | wrmsrl(MSR_IA32_MCU_OPT_CTRL, val: mcu_ctrl); |
783 | |
784 | /* |
785 | * Check to make sure that the WRMSR value was not ignored. Writes to |
786 | * GDS_MITG_DIS will be ignored if this processor is locked but the boot |
787 | * processor was not. |
788 | */ |
789 | rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); |
790 | WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); |
791 | } |
792 | |
793 | static void __init gds_select_mitigation(void) |
794 | { |
795 | u64 mcu_ctrl; |
796 | |
797 | if (!boot_cpu_has_bug(X86_BUG_GDS)) |
798 | return; |
799 | |
800 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { |
801 | gds_mitigation = GDS_MITIGATION_HYPERVISOR; |
802 | goto out; |
803 | } |
804 | |
805 | if (cpu_mitigations_off()) |
806 | gds_mitigation = GDS_MITIGATION_OFF; |
807 | /* Will verify below that mitigation _can_ be disabled */ |
808 | |
809 | /* No microcode */ |
810 | if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) { |
811 | if (gds_mitigation == GDS_MITIGATION_FORCE) { |
812 | /* |
813 | * This only needs to be done on the boot CPU so do it |
814 | * here rather than in update_gds_msr() |
815 | */ |
816 | setup_clear_cpu_cap(X86_FEATURE_AVX); |
817 | pr_warn("Microcode update needed! Disabling AVX as mitigation.\n" ); |
818 | } else { |
819 | gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; |
820 | } |
821 | goto out; |
822 | } |
823 | |
824 | /* Microcode has mitigation, use it */ |
825 | if (gds_mitigation == GDS_MITIGATION_FORCE) |
826 | gds_mitigation = GDS_MITIGATION_FULL; |
827 | |
828 | rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); |
829 | if (mcu_ctrl & GDS_MITG_LOCKED) { |
830 | if (gds_mitigation == GDS_MITIGATION_OFF) |
831 | pr_warn("Mitigation locked. Disable failed.\n" ); |
832 | |
833 | /* |
834 | * The mitigation is selected from the boot CPU. All other CPUs |
835 | * _should_ have the same state. If the boot CPU isn't locked |
836 | * but others are then update_gds_msr() will WARN() of the state |
837 | * mismatch. If the boot CPU is locked update_gds_msr() will |
838 | * ensure the other CPUs have the mitigation enabled. |
839 | */ |
840 | gds_mitigation = GDS_MITIGATION_FULL_LOCKED; |
841 | } |
842 | |
843 | update_gds_msr(); |
844 | out: |
845 | pr_info("%s\n" , gds_strings[gds_mitigation]); |
846 | } |
847 | |
848 | static int __init gds_parse_cmdline(char *str) |
849 | { |
850 | if (!str) |
851 | return -EINVAL; |
852 | |
853 | if (!boot_cpu_has_bug(X86_BUG_GDS)) |
854 | return 0; |
855 | |
856 | if (!strcmp(str, "off" )) |
857 | gds_mitigation = GDS_MITIGATION_OFF; |
858 | else if (!strcmp(str, "force" )) |
859 | gds_mitigation = GDS_MITIGATION_FORCE; |
860 | |
861 | return 0; |
862 | } |
863 | early_param("gather_data_sampling" , gds_parse_cmdline); |
864 | |
865 | #undef pr_fmt |
866 | #define pr_fmt(fmt) "Spectre V1 : " fmt |
867 | |
868 | enum spectre_v1_mitigation { |
869 | SPECTRE_V1_MITIGATION_NONE, |
870 | SPECTRE_V1_MITIGATION_AUTO, |
871 | }; |
872 | |
873 | static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = |
874 | SPECTRE_V1_MITIGATION_AUTO; |
875 | |
876 | static const char * const spectre_v1_strings[] = { |
877 | [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers" , |
878 | [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization" , |
879 | }; |
880 | |
881 | /* |
882 | * Does SMAP provide full mitigation against speculative kernel access to |
883 | * userspace? |
884 | */ |
885 | static bool smap_works_speculatively(void) |
886 | { |
887 | if (!boot_cpu_has(X86_FEATURE_SMAP)) |
888 | return false; |
889 | |
890 | /* |
891 | * On CPUs which are vulnerable to Meltdown, SMAP does not |
892 | * prevent speculative access to user data in the L1 cache. |
893 | * Consider SMAP to be non-functional as a mitigation on these |
894 | * CPUs. |
895 | */ |
896 | if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) |
897 | return false; |
898 | |
899 | return true; |
900 | } |
901 | |
902 | static void __init spectre_v1_select_mitigation(void) |
903 | { |
904 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) { |
905 | spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; |
906 | return; |
907 | } |
908 | |
909 | if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { |
910 | /* |
911 | * With Spectre v1, a user can speculatively control either |
912 | * path of a conditional swapgs with a user-controlled GS |
913 | * value. The mitigation is to add lfences to both code paths. |
914 | * |
915 | * If FSGSBASE is enabled, the user can put a kernel address in |
916 | * GS, in which case SMAP provides no protection. |
917 | * |
918 | * If FSGSBASE is disabled, the user can only put a user space |
919 | * address in GS. That makes an attack harder, but still |
920 | * possible if there's no SMAP protection. |
921 | */ |
922 | if (boot_cpu_has(X86_FEATURE_FSGSBASE) || |
923 | !smap_works_speculatively()) { |
924 | /* |
925 | * Mitigation can be provided from SWAPGS itself or |
926 | * PTI as the CR3 write in the Meltdown mitigation |
927 | * is serializing. |
928 | * |
929 | * If neither is there, mitigate with an LFENCE to |
930 | * stop speculation through swapgs. |
931 | */ |
932 | if (boot_cpu_has_bug(X86_BUG_SWAPGS) && |
933 | !boot_cpu_has(X86_FEATURE_PTI)) |
934 | setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); |
935 | |
936 | /* |
937 | * Enable lfences in the kernel entry (non-swapgs) |
938 | * paths, to prevent user entry from speculatively |
939 | * skipping swapgs. |
940 | */ |
941 | setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); |
942 | } |
943 | } |
944 | |
945 | pr_info("%s\n" , spectre_v1_strings[spectre_v1_mitigation]); |
946 | } |
947 | |
948 | static int __init nospectre_v1_cmdline(char *str) |
949 | { |
950 | spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; |
951 | return 0; |
952 | } |
953 | early_param("nospectre_v1" , nospectre_v1_cmdline); |
954 | |
955 | enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; |
956 | |
957 | #undef pr_fmt |
958 | #define pr_fmt(fmt) "RETBleed: " fmt |
959 | |
960 | enum retbleed_mitigation { |
961 | RETBLEED_MITIGATION_NONE, |
962 | RETBLEED_MITIGATION_UNRET, |
963 | RETBLEED_MITIGATION_IBPB, |
964 | RETBLEED_MITIGATION_IBRS, |
965 | RETBLEED_MITIGATION_EIBRS, |
966 | RETBLEED_MITIGATION_STUFF, |
967 | }; |
968 | |
969 | enum retbleed_mitigation_cmd { |
970 | RETBLEED_CMD_OFF, |
971 | RETBLEED_CMD_AUTO, |
972 | RETBLEED_CMD_UNRET, |
973 | RETBLEED_CMD_IBPB, |
974 | RETBLEED_CMD_STUFF, |
975 | }; |
976 | |
977 | static const char * const retbleed_strings[] = { |
978 | [RETBLEED_MITIGATION_NONE] = "Vulnerable" , |
979 | [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk" , |
980 | [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB" , |
981 | [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS" , |
982 | [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS" , |
983 | [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing" , |
984 | }; |
985 | |
986 | static enum retbleed_mitigation retbleed_mitigation __ro_after_init = |
987 | RETBLEED_MITIGATION_NONE; |
988 | static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init = |
989 | RETBLEED_CMD_AUTO; |
990 | |
991 | static int __ro_after_init retbleed_nosmt = false; |
992 | |
993 | static int __init retbleed_parse_cmdline(char *str) |
994 | { |
995 | if (!str) |
996 | return -EINVAL; |
997 | |
998 | while (str) { |
999 | char *next = strchr(str, ','); |
1000 | if (next) { |
1001 | *next = 0; |
1002 | next++; |
1003 | } |
1004 | |
1005 | if (!strcmp(str, "off" )) { |
1006 | retbleed_cmd = RETBLEED_CMD_OFF; |
1007 | } else if (!strcmp(str, "auto" )) { |
1008 | retbleed_cmd = RETBLEED_CMD_AUTO; |
1009 | } else if (!strcmp(str, "unret" )) { |
1010 | retbleed_cmd = RETBLEED_CMD_UNRET; |
1011 | } else if (!strcmp(str, "ibpb" )) { |
1012 | retbleed_cmd = RETBLEED_CMD_IBPB; |
1013 | } else if (!strcmp(str, "stuff" )) { |
1014 | retbleed_cmd = RETBLEED_CMD_STUFF; |
1015 | } else if (!strcmp(str, "nosmt" )) { |
1016 | retbleed_nosmt = true; |
1017 | } else if (!strcmp(str, "force" )) { |
1018 | setup_force_cpu_bug(X86_BUG_RETBLEED); |
1019 | } else { |
1020 | pr_err("Ignoring unknown retbleed option (%s)." , str); |
1021 | } |
1022 | |
1023 | str = next; |
1024 | } |
1025 | |
1026 | return 0; |
1027 | } |
1028 | early_param("retbleed" , retbleed_parse_cmdline); |
1029 | |
1030 | #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" |
1031 | #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" |
1032 | |
1033 | static void __init retbleed_select_mitigation(void) |
1034 | { |
1035 | bool mitigate_smt = false; |
1036 | |
1037 | if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) |
1038 | return; |
1039 | |
1040 | switch (retbleed_cmd) { |
1041 | case RETBLEED_CMD_OFF: |
1042 | return; |
1043 | |
1044 | case RETBLEED_CMD_UNRET: |
1045 | if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) { |
1046 | retbleed_mitigation = RETBLEED_MITIGATION_UNRET; |
1047 | } else { |
1048 | pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n" ); |
1049 | goto do_cmd_auto; |
1050 | } |
1051 | break; |
1052 | |
1053 | case RETBLEED_CMD_IBPB: |
1054 | if (!boot_cpu_has(X86_FEATURE_IBPB)) { |
1055 | pr_err("WARNING: CPU does not support IBPB.\n" ); |
1056 | goto do_cmd_auto; |
1057 | } else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { |
1058 | retbleed_mitigation = RETBLEED_MITIGATION_IBPB; |
1059 | } else { |
1060 | pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n" ); |
1061 | goto do_cmd_auto; |
1062 | } |
1063 | break; |
1064 | |
1065 | case RETBLEED_CMD_STUFF: |
1066 | if (IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) && |
1067 | spectre_v2_enabled == SPECTRE_V2_RETPOLINE) { |
1068 | retbleed_mitigation = RETBLEED_MITIGATION_STUFF; |
1069 | |
1070 | } else { |
1071 | if (IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) |
1072 | pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n" ); |
1073 | else |
1074 | pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n" ); |
1075 | |
1076 | goto do_cmd_auto; |
1077 | } |
1078 | break; |
1079 | |
1080 | do_cmd_auto: |
1081 | case RETBLEED_CMD_AUTO: |
1082 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || |
1083 | boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { |
1084 | if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) |
1085 | retbleed_mitigation = RETBLEED_MITIGATION_UNRET; |
1086 | else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) && |
1087 | boot_cpu_has(X86_FEATURE_IBPB)) |
1088 | retbleed_mitigation = RETBLEED_MITIGATION_IBPB; |
1089 | } |
1090 | |
1091 | /* |
1092 | * The Intel mitigation (IBRS or eIBRS) was already selected in |
1093 | * spectre_v2_select_mitigation(). 'retbleed_mitigation' will |
1094 | * be set accordingly below. |
1095 | */ |
1096 | |
1097 | break; |
1098 | } |
1099 | |
1100 | switch (retbleed_mitigation) { |
1101 | case RETBLEED_MITIGATION_UNRET: |
1102 | setup_force_cpu_cap(X86_FEATURE_RETHUNK); |
1103 | setup_force_cpu_cap(X86_FEATURE_UNRET); |
1104 | |
1105 | x86_return_thunk = retbleed_return_thunk; |
1106 | |
1107 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
1108 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) |
1109 | pr_err(RETBLEED_UNTRAIN_MSG); |
1110 | |
1111 | mitigate_smt = true; |
1112 | break; |
1113 | |
1114 | case RETBLEED_MITIGATION_IBPB: |
1115 | setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); |
1116 | setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); |
1117 | mitigate_smt = true; |
1118 | break; |
1119 | |
1120 | case RETBLEED_MITIGATION_STUFF: |
1121 | setup_force_cpu_cap(X86_FEATURE_RETHUNK); |
1122 | setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); |
1123 | |
1124 | x86_return_thunk = call_depth_return_thunk; |
1125 | break; |
1126 | |
1127 | default: |
1128 | break; |
1129 | } |
1130 | |
1131 | if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && |
1132 | (retbleed_nosmt || cpu_mitigations_auto_nosmt())) |
1133 | cpu_smt_disable(force: false); |
1134 | |
1135 | /* |
1136 | * Let IBRS trump all on Intel without affecting the effects of the |
1137 | * retbleed= cmdline option except for call depth based stuffing |
1138 | */ |
1139 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { |
1140 | switch (spectre_v2_enabled) { |
1141 | case SPECTRE_V2_IBRS: |
1142 | retbleed_mitigation = RETBLEED_MITIGATION_IBRS; |
1143 | break; |
1144 | case SPECTRE_V2_EIBRS: |
1145 | case SPECTRE_V2_EIBRS_RETPOLINE: |
1146 | case SPECTRE_V2_EIBRS_LFENCE: |
1147 | retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; |
1148 | break; |
1149 | default: |
1150 | if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) |
1151 | pr_err(RETBLEED_INTEL_MSG); |
1152 | } |
1153 | } |
1154 | |
1155 | pr_info("%s\n" , retbleed_strings[retbleed_mitigation]); |
1156 | } |
1157 | |
1158 | #undef pr_fmt |
1159 | #define pr_fmt(fmt) "Spectre V2 : " fmt |
1160 | |
1161 | static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = |
1162 | SPECTRE_V2_USER_NONE; |
1163 | static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = |
1164 | SPECTRE_V2_USER_NONE; |
1165 | |
1166 | #ifdef CONFIG_MITIGATION_RETPOLINE |
1167 | static bool spectre_v2_bad_module; |
1168 | |
1169 | bool retpoline_module_ok(bool has_retpoline) |
1170 | { |
1171 | if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) |
1172 | return true; |
1173 | |
1174 | pr_err("System may be vulnerable to spectre v2\n" ); |
1175 | spectre_v2_bad_module = true; |
1176 | return false; |
1177 | } |
1178 | |
1179 | static inline const char *spectre_v2_module_string(void) |
1180 | { |
1181 | return spectre_v2_bad_module ? " - vulnerable module loaded" : "" ; |
1182 | } |
1183 | #else |
1184 | static inline const char *spectre_v2_module_string(void) { return "" ; } |
1185 | #endif |
1186 | |
1187 | #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" |
1188 | #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" |
1189 | #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" |
1190 | #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" |
1191 | |
1192 | #ifdef CONFIG_BPF_SYSCALL |
1193 | void unpriv_ebpf_notify(int new_state) |
1194 | { |
1195 | if (new_state) |
1196 | return; |
1197 | |
1198 | /* Unprivileged eBPF is enabled */ |
1199 | |
1200 | switch (spectre_v2_enabled) { |
1201 | case SPECTRE_V2_EIBRS: |
1202 | pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); |
1203 | break; |
1204 | case SPECTRE_V2_EIBRS_LFENCE: |
1205 | if (sched_smt_active()) |
1206 | pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); |
1207 | break; |
1208 | default: |
1209 | break; |
1210 | } |
1211 | } |
1212 | #endif |
1213 | |
1214 | static inline bool match_option(const char *arg, int arglen, const char *opt) |
1215 | { |
1216 | int len = strlen(opt); |
1217 | |
1218 | return len == arglen && !strncmp(arg, opt, len); |
1219 | } |
1220 | |
1221 | /* The kernel command line selection for spectre v2 */ |
1222 | enum spectre_v2_mitigation_cmd { |
1223 | SPECTRE_V2_CMD_NONE, |
1224 | SPECTRE_V2_CMD_AUTO, |
1225 | SPECTRE_V2_CMD_FORCE, |
1226 | SPECTRE_V2_CMD_RETPOLINE, |
1227 | SPECTRE_V2_CMD_RETPOLINE_GENERIC, |
1228 | SPECTRE_V2_CMD_RETPOLINE_LFENCE, |
1229 | SPECTRE_V2_CMD_EIBRS, |
1230 | SPECTRE_V2_CMD_EIBRS_RETPOLINE, |
1231 | SPECTRE_V2_CMD_EIBRS_LFENCE, |
1232 | SPECTRE_V2_CMD_IBRS, |
1233 | }; |
1234 | |
1235 | enum spectre_v2_user_cmd { |
1236 | SPECTRE_V2_USER_CMD_NONE, |
1237 | SPECTRE_V2_USER_CMD_AUTO, |
1238 | SPECTRE_V2_USER_CMD_FORCE, |
1239 | SPECTRE_V2_USER_CMD_PRCTL, |
1240 | SPECTRE_V2_USER_CMD_PRCTL_IBPB, |
1241 | SPECTRE_V2_USER_CMD_SECCOMP, |
1242 | SPECTRE_V2_USER_CMD_SECCOMP_IBPB, |
1243 | }; |
1244 | |
1245 | static const char * const spectre_v2_user_strings[] = { |
1246 | [SPECTRE_V2_USER_NONE] = "User space: Vulnerable" , |
1247 | [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection" , |
1248 | [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection" , |
1249 | [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl" , |
1250 | [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl" , |
1251 | }; |
1252 | |
1253 | static const struct { |
1254 | const char *option; |
1255 | enum spectre_v2_user_cmd cmd; |
1256 | bool secure; |
1257 | } v2_user_options[] __initconst = { |
1258 | { "auto" , SPECTRE_V2_USER_CMD_AUTO, false }, |
1259 | { "off" , SPECTRE_V2_USER_CMD_NONE, false }, |
1260 | { "on" , SPECTRE_V2_USER_CMD_FORCE, true }, |
1261 | { "prctl" , SPECTRE_V2_USER_CMD_PRCTL, false }, |
1262 | { "prctl,ibpb" , SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, |
1263 | { "seccomp" , SPECTRE_V2_USER_CMD_SECCOMP, false }, |
1264 | { "seccomp,ibpb" , SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, |
1265 | }; |
1266 | |
1267 | static void __init spec_v2_user_print_cond(const char *reason, bool secure) |
1268 | { |
1269 | if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) |
1270 | pr_info("spectre_v2_user=%s forced on command line.\n" , reason); |
1271 | } |
1272 | |
1273 | static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; |
1274 | |
1275 | static enum spectre_v2_user_cmd __init |
1276 | spectre_v2_parse_user_cmdline(void) |
1277 | { |
1278 | char arg[20]; |
1279 | int ret, i; |
1280 | |
1281 | switch (spectre_v2_cmd) { |
1282 | case SPECTRE_V2_CMD_NONE: |
1283 | return SPECTRE_V2_USER_CMD_NONE; |
1284 | case SPECTRE_V2_CMD_FORCE: |
1285 | return SPECTRE_V2_USER_CMD_FORCE; |
1286 | default: |
1287 | break; |
1288 | } |
1289 | |
1290 | ret = cmdline_find_option(cmdline_ptr: boot_command_line, option: "spectre_v2_user" , |
1291 | buffer: arg, bufsize: sizeof(arg)); |
1292 | if (ret < 0) |
1293 | return SPECTRE_V2_USER_CMD_AUTO; |
1294 | |
1295 | for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { |
1296 | if (match_option(arg, arglen: ret, opt: v2_user_options[i].option)) { |
1297 | spec_v2_user_print_cond(reason: v2_user_options[i].option, |
1298 | secure: v2_user_options[i].secure); |
1299 | return v2_user_options[i].cmd; |
1300 | } |
1301 | } |
1302 | |
1303 | pr_err("Unknown user space protection option (%s). Switching to AUTO select\n" , arg); |
1304 | return SPECTRE_V2_USER_CMD_AUTO; |
1305 | } |
1306 | |
1307 | static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) |
1308 | { |
1309 | return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS; |
1310 | } |
1311 | |
1312 | static void __init |
1313 | spectre_v2_user_select_mitigation(void) |
1314 | { |
1315 | enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; |
1316 | bool smt_possible = IS_ENABLED(CONFIG_SMP); |
1317 | enum spectre_v2_user_cmd cmd; |
1318 | |
1319 | if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) |
1320 | return; |
1321 | |
1322 | if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || |
1323 | cpu_smt_control == CPU_SMT_NOT_SUPPORTED) |
1324 | smt_possible = false; |
1325 | |
1326 | cmd = spectre_v2_parse_user_cmdline(); |
1327 | switch (cmd) { |
1328 | case SPECTRE_V2_USER_CMD_NONE: |
1329 | goto set_mode; |
1330 | case SPECTRE_V2_USER_CMD_FORCE: |
1331 | mode = SPECTRE_V2_USER_STRICT; |
1332 | break; |
1333 | case SPECTRE_V2_USER_CMD_AUTO: |
1334 | case SPECTRE_V2_USER_CMD_PRCTL: |
1335 | case SPECTRE_V2_USER_CMD_PRCTL_IBPB: |
1336 | mode = SPECTRE_V2_USER_PRCTL; |
1337 | break; |
1338 | case SPECTRE_V2_USER_CMD_SECCOMP: |
1339 | case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: |
1340 | if (IS_ENABLED(CONFIG_SECCOMP)) |
1341 | mode = SPECTRE_V2_USER_SECCOMP; |
1342 | else |
1343 | mode = SPECTRE_V2_USER_PRCTL; |
1344 | break; |
1345 | } |
1346 | |
1347 | /* Initialize Indirect Branch Prediction Barrier */ |
1348 | if (boot_cpu_has(X86_FEATURE_IBPB)) { |
1349 | setup_force_cpu_cap(X86_FEATURE_USE_IBPB); |
1350 | |
1351 | spectre_v2_user_ibpb = mode; |
1352 | switch (cmd) { |
1353 | case SPECTRE_V2_USER_CMD_NONE: |
1354 | break; |
1355 | case SPECTRE_V2_USER_CMD_FORCE: |
1356 | case SPECTRE_V2_USER_CMD_PRCTL_IBPB: |
1357 | case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: |
1358 | static_branch_enable(&switch_mm_always_ibpb); |
1359 | spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; |
1360 | break; |
1361 | case SPECTRE_V2_USER_CMD_PRCTL: |
1362 | case SPECTRE_V2_USER_CMD_AUTO: |
1363 | case SPECTRE_V2_USER_CMD_SECCOMP: |
1364 | static_branch_enable(&switch_mm_cond_ibpb); |
1365 | break; |
1366 | } |
1367 | |
1368 | pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n" , |
1369 | static_key_enabled(&switch_mm_always_ibpb) ? |
1370 | "always-on" : "conditional" ); |
1371 | } |
1372 | |
1373 | /* |
1374 | * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP |
1375 | * is not required. |
1376 | * |
1377 | * Intel's Enhanced IBRS also protects against cross-thread branch target |
1378 | * injection in user-mode as the IBRS bit remains always set which |
1379 | * implicitly enables cross-thread protections. However, in legacy IBRS |
1380 | * mode, the IBRS bit is set only on kernel entry and cleared on return |
1381 | * to userspace. AMD Automatic IBRS also does not protect userspace. |
1382 | * These modes therefore disable the implicit cross-thread protection, |
1383 | * so allow for STIBP to be selected in those cases. |
1384 | */ |
1385 | if (!boot_cpu_has(X86_FEATURE_STIBP) || |
1386 | !smt_possible || |
1387 | (spectre_v2_in_eibrs_mode(mode: spectre_v2_enabled) && |
1388 | !boot_cpu_has(X86_FEATURE_AUTOIBRS))) |
1389 | return; |
1390 | |
1391 | /* |
1392 | * At this point, an STIBP mode other than "off" has been set. |
1393 | * If STIBP support is not being forced, check if STIBP always-on |
1394 | * is preferred. |
1395 | */ |
1396 | if (mode != SPECTRE_V2_USER_STRICT && |
1397 | boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) |
1398 | mode = SPECTRE_V2_USER_STRICT_PREFERRED; |
1399 | |
1400 | if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || |
1401 | retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { |
1402 | if (mode != SPECTRE_V2_USER_STRICT && |
1403 | mode != SPECTRE_V2_USER_STRICT_PREFERRED) |
1404 | pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n" ); |
1405 | mode = SPECTRE_V2_USER_STRICT_PREFERRED; |
1406 | } |
1407 | |
1408 | spectre_v2_user_stibp = mode; |
1409 | |
1410 | set_mode: |
1411 | pr_info("%s\n" , spectre_v2_user_strings[mode]); |
1412 | } |
1413 | |
1414 | static const char * const spectre_v2_strings[] = { |
1415 | [SPECTRE_V2_NONE] = "Vulnerable" , |
1416 | [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines" , |
1417 | [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE" , |
1418 | [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS" , |
1419 | [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE" , |
1420 | [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines" , |
1421 | [SPECTRE_V2_IBRS] = "Mitigation: IBRS" , |
1422 | }; |
1423 | |
1424 | static const struct { |
1425 | const char *option; |
1426 | enum spectre_v2_mitigation_cmd cmd; |
1427 | bool secure; |
1428 | } mitigation_options[] __initconst = { |
1429 | { "off" , SPECTRE_V2_CMD_NONE, false }, |
1430 | { "on" , SPECTRE_V2_CMD_FORCE, true }, |
1431 | { "retpoline" , SPECTRE_V2_CMD_RETPOLINE, false }, |
1432 | { "retpoline,amd" , SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, |
1433 | { "retpoline,lfence" , SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, |
1434 | { "retpoline,generic" , SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, |
1435 | { "eibrs" , SPECTRE_V2_CMD_EIBRS, false }, |
1436 | { "eibrs,lfence" , SPECTRE_V2_CMD_EIBRS_LFENCE, false }, |
1437 | { "eibrs,retpoline" , SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, |
1438 | { "auto" , SPECTRE_V2_CMD_AUTO, false }, |
1439 | { "ibrs" , SPECTRE_V2_CMD_IBRS, false }, |
1440 | }; |
1441 | |
1442 | static void __init spec_v2_print_cond(const char *reason, bool secure) |
1443 | { |
1444 | if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) |
1445 | pr_info("%s selected on command line.\n" , reason); |
1446 | } |
1447 | |
1448 | static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) |
1449 | { |
1450 | enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; |
1451 | char arg[20]; |
1452 | int ret, i; |
1453 | |
1454 | if (cmdline_find_option_bool(cmdline_ptr: boot_command_line, option: "nospectre_v2" ) || |
1455 | cpu_mitigations_off()) |
1456 | return SPECTRE_V2_CMD_NONE; |
1457 | |
1458 | ret = cmdline_find_option(cmdline_ptr: boot_command_line, option: "spectre_v2" , buffer: arg, bufsize: sizeof(arg)); |
1459 | if (ret < 0) |
1460 | return SPECTRE_V2_CMD_AUTO; |
1461 | |
1462 | for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { |
1463 | if (!match_option(arg, arglen: ret, opt: mitigation_options[i].option)) |
1464 | continue; |
1465 | cmd = mitigation_options[i].cmd; |
1466 | break; |
1467 | } |
1468 | |
1469 | if (i >= ARRAY_SIZE(mitigation_options)) { |
1470 | pr_err("unknown option (%s). Switching to AUTO select\n" , arg); |
1471 | return SPECTRE_V2_CMD_AUTO; |
1472 | } |
1473 | |
1474 | if ((cmd == SPECTRE_V2_CMD_RETPOLINE || |
1475 | cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || |
1476 | cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || |
1477 | cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || |
1478 | cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && |
1479 | !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { |
1480 | pr_err("%s selected but not compiled in. Switching to AUTO select\n" , |
1481 | mitigation_options[i].option); |
1482 | return SPECTRE_V2_CMD_AUTO; |
1483 | } |
1484 | |
1485 | if ((cmd == SPECTRE_V2_CMD_EIBRS || |
1486 | cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || |
1487 | cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && |
1488 | !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { |
1489 | pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n" , |
1490 | mitigation_options[i].option); |
1491 | return SPECTRE_V2_CMD_AUTO; |
1492 | } |
1493 | |
1494 | if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || |
1495 | cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && |
1496 | !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { |
1497 | pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n" , |
1498 | mitigation_options[i].option); |
1499 | return SPECTRE_V2_CMD_AUTO; |
1500 | } |
1501 | |
1502 | if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) { |
1503 | pr_err("%s selected but not compiled in. Switching to AUTO select\n" , |
1504 | mitigation_options[i].option); |
1505 | return SPECTRE_V2_CMD_AUTO; |
1506 | } |
1507 | |
1508 | if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { |
1509 | pr_err("%s selected but not Intel CPU. Switching to AUTO select\n" , |
1510 | mitigation_options[i].option); |
1511 | return SPECTRE_V2_CMD_AUTO; |
1512 | } |
1513 | |
1514 | if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { |
1515 | pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n" , |
1516 | mitigation_options[i].option); |
1517 | return SPECTRE_V2_CMD_AUTO; |
1518 | } |
1519 | |
1520 | if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) { |
1521 | pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n" , |
1522 | mitigation_options[i].option); |
1523 | return SPECTRE_V2_CMD_AUTO; |
1524 | } |
1525 | |
1526 | spec_v2_print_cond(reason: mitigation_options[i].option, |
1527 | secure: mitigation_options[i].secure); |
1528 | return cmd; |
1529 | } |
1530 | |
1531 | static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) |
1532 | { |
1533 | if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { |
1534 | pr_err("Kernel not compiled with retpoline; no mitigation available!" ); |
1535 | return SPECTRE_V2_NONE; |
1536 | } |
1537 | |
1538 | return SPECTRE_V2_RETPOLINE; |
1539 | } |
1540 | |
1541 | static bool __ro_after_init rrsba_disabled; |
1542 | |
1543 | /* Disable in-kernel use of non-RSB RET predictors */ |
1544 | static void __init spec_ctrl_disable_kernel_rrsba(void) |
1545 | { |
1546 | if (rrsba_disabled) |
1547 | return; |
1548 | |
1549 | if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) { |
1550 | rrsba_disabled = true; |
1551 | return; |
1552 | } |
1553 | |
1554 | if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) |
1555 | return; |
1556 | |
1557 | x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; |
1558 | update_spec_ctrl(val: x86_spec_ctrl_base); |
1559 | rrsba_disabled = true; |
1560 | } |
1561 | |
1562 | static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode) |
1563 | { |
1564 | /* |
1565 | * Similar to context switches, there are two types of RSB attacks |
1566 | * after VM exit: |
1567 | * |
1568 | * 1) RSB underflow |
1569 | * |
1570 | * 2) Poisoned RSB entry |
1571 | * |
1572 | * When retpoline is enabled, both are mitigated by filling/clearing |
1573 | * the RSB. |
1574 | * |
1575 | * When IBRS is enabled, while #1 would be mitigated by the IBRS branch |
1576 | * prediction isolation protections, RSB still needs to be cleared |
1577 | * because of #2. Note that SMEP provides no protection here, unlike |
1578 | * user-space-poisoned RSB entries. |
1579 | * |
1580 | * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB |
1581 | * bug is present then a LITE version of RSB protection is required, |
1582 | * just a single call needs to retire before a RET is executed. |
1583 | */ |
1584 | switch (mode) { |
1585 | case SPECTRE_V2_NONE: |
1586 | return; |
1587 | |
1588 | case SPECTRE_V2_EIBRS_LFENCE: |
1589 | case SPECTRE_V2_EIBRS: |
1590 | if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { |
1591 | setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); |
1592 | pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n" ); |
1593 | } |
1594 | return; |
1595 | |
1596 | case SPECTRE_V2_EIBRS_RETPOLINE: |
1597 | case SPECTRE_V2_RETPOLINE: |
1598 | case SPECTRE_V2_LFENCE: |
1599 | case SPECTRE_V2_IBRS: |
1600 | setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); |
1601 | pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n" ); |
1602 | return; |
1603 | } |
1604 | |
1605 | pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit" ); |
1606 | dump_stack(); |
1607 | } |
1608 | |
1609 | /* |
1610 | * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by |
1611 | * branch history in userspace. Not needed if BHI_NO is set. |
1612 | */ |
1613 | static bool __init spec_ctrl_bhi_dis(void) |
1614 | { |
1615 | if (!boot_cpu_has(X86_FEATURE_BHI_CTRL)) |
1616 | return false; |
1617 | |
1618 | x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S; |
1619 | update_spec_ctrl(val: x86_spec_ctrl_base); |
1620 | setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW); |
1621 | |
1622 | return true; |
1623 | } |
1624 | |
1625 | enum bhi_mitigations { |
1626 | BHI_MITIGATION_OFF, |
1627 | BHI_MITIGATION_ON, |
1628 | }; |
1629 | |
1630 | static enum bhi_mitigations bhi_mitigation __ro_after_init = |
1631 | IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_ON : BHI_MITIGATION_OFF; |
1632 | |
1633 | static int __init spectre_bhi_parse_cmdline(char *str) |
1634 | { |
1635 | if (!str) |
1636 | return -EINVAL; |
1637 | |
1638 | if (!strcmp(str, "off" )) |
1639 | bhi_mitigation = BHI_MITIGATION_OFF; |
1640 | else if (!strcmp(str, "on" )) |
1641 | bhi_mitigation = BHI_MITIGATION_ON; |
1642 | else |
1643 | pr_err("Ignoring unknown spectre_bhi option (%s)" , str); |
1644 | |
1645 | return 0; |
1646 | } |
1647 | early_param("spectre_bhi" , spectre_bhi_parse_cmdline); |
1648 | |
1649 | static void __init bhi_select_mitigation(void) |
1650 | { |
1651 | if (bhi_mitigation == BHI_MITIGATION_OFF) |
1652 | return; |
1653 | |
1654 | /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */ |
1655 | if (boot_cpu_has(X86_FEATURE_RETPOLINE) && |
1656 | !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) { |
1657 | spec_ctrl_disable_kernel_rrsba(); |
1658 | if (rrsba_disabled) |
1659 | return; |
1660 | } |
1661 | |
1662 | if (spec_ctrl_bhi_dis()) |
1663 | return; |
1664 | |
1665 | if (!IS_ENABLED(CONFIG_X86_64)) |
1666 | return; |
1667 | |
1668 | /* Mitigate KVM by default */ |
1669 | setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT); |
1670 | pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n" ); |
1671 | |
1672 | /* Mitigate syscalls when the mitigation is forced =on */ |
1673 | setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP); |
1674 | pr_info("Spectre BHI mitigation: SW BHB clearing on syscall\n" ); |
1675 | } |
1676 | |
1677 | static void __init spectre_v2_select_mitigation(void) |
1678 | { |
1679 | enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); |
1680 | enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; |
1681 | |
1682 | /* |
1683 | * If the CPU is not affected and the command line mode is NONE or AUTO |
1684 | * then nothing to do. |
1685 | */ |
1686 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && |
1687 | (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) |
1688 | return; |
1689 | |
1690 | switch (cmd) { |
1691 | case SPECTRE_V2_CMD_NONE: |
1692 | return; |
1693 | |
1694 | case SPECTRE_V2_CMD_FORCE: |
1695 | case SPECTRE_V2_CMD_AUTO: |
1696 | if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { |
1697 | mode = SPECTRE_V2_EIBRS; |
1698 | break; |
1699 | } |
1700 | |
1701 | if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) && |
1702 | boot_cpu_has_bug(X86_BUG_RETBLEED) && |
1703 | retbleed_cmd != RETBLEED_CMD_OFF && |
1704 | retbleed_cmd != RETBLEED_CMD_STUFF && |
1705 | boot_cpu_has(X86_FEATURE_IBRS) && |
1706 | boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { |
1707 | mode = SPECTRE_V2_IBRS; |
1708 | break; |
1709 | } |
1710 | |
1711 | mode = spectre_v2_select_retpoline(); |
1712 | break; |
1713 | |
1714 | case SPECTRE_V2_CMD_RETPOLINE_LFENCE: |
1715 | pr_err(SPECTRE_V2_LFENCE_MSG); |
1716 | mode = SPECTRE_V2_LFENCE; |
1717 | break; |
1718 | |
1719 | case SPECTRE_V2_CMD_RETPOLINE_GENERIC: |
1720 | mode = SPECTRE_V2_RETPOLINE; |
1721 | break; |
1722 | |
1723 | case SPECTRE_V2_CMD_RETPOLINE: |
1724 | mode = spectre_v2_select_retpoline(); |
1725 | break; |
1726 | |
1727 | case SPECTRE_V2_CMD_IBRS: |
1728 | mode = SPECTRE_V2_IBRS; |
1729 | break; |
1730 | |
1731 | case SPECTRE_V2_CMD_EIBRS: |
1732 | mode = SPECTRE_V2_EIBRS; |
1733 | break; |
1734 | |
1735 | case SPECTRE_V2_CMD_EIBRS_LFENCE: |
1736 | mode = SPECTRE_V2_EIBRS_LFENCE; |
1737 | break; |
1738 | |
1739 | case SPECTRE_V2_CMD_EIBRS_RETPOLINE: |
1740 | mode = SPECTRE_V2_EIBRS_RETPOLINE; |
1741 | break; |
1742 | } |
1743 | |
1744 | if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) |
1745 | pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); |
1746 | |
1747 | if (spectre_v2_in_ibrs_mode(mode)) { |
1748 | if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) { |
1749 | msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); |
1750 | } else { |
1751 | x86_spec_ctrl_base |= SPEC_CTRL_IBRS; |
1752 | update_spec_ctrl(val: x86_spec_ctrl_base); |
1753 | } |
1754 | } |
1755 | |
1756 | switch (mode) { |
1757 | case SPECTRE_V2_NONE: |
1758 | case SPECTRE_V2_EIBRS: |
1759 | break; |
1760 | |
1761 | case SPECTRE_V2_IBRS: |
1762 | setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); |
1763 | if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) |
1764 | pr_warn(SPECTRE_V2_IBRS_PERF_MSG); |
1765 | break; |
1766 | |
1767 | case SPECTRE_V2_LFENCE: |
1768 | case SPECTRE_V2_EIBRS_LFENCE: |
1769 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); |
1770 | fallthrough; |
1771 | |
1772 | case SPECTRE_V2_RETPOLINE: |
1773 | case SPECTRE_V2_EIBRS_RETPOLINE: |
1774 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE); |
1775 | break; |
1776 | } |
1777 | |
1778 | /* |
1779 | * Disable alternate RSB predictions in kernel when indirect CALLs and |
1780 | * JMPs gets protection against BHI and Intramode-BTI, but RET |
1781 | * prediction from a non-RSB predictor is still a risk. |
1782 | */ |
1783 | if (mode == SPECTRE_V2_EIBRS_LFENCE || |
1784 | mode == SPECTRE_V2_EIBRS_RETPOLINE || |
1785 | mode == SPECTRE_V2_RETPOLINE) |
1786 | spec_ctrl_disable_kernel_rrsba(); |
1787 | |
1788 | if (boot_cpu_has(X86_BUG_BHI)) |
1789 | bhi_select_mitigation(); |
1790 | |
1791 | spectre_v2_enabled = mode; |
1792 | pr_info("%s\n" , spectre_v2_strings[mode]); |
1793 | |
1794 | /* |
1795 | * If Spectre v2 protection has been enabled, fill the RSB during a |
1796 | * context switch. In general there are two types of RSB attacks |
1797 | * across context switches, for which the CALLs/RETs may be unbalanced. |
1798 | * |
1799 | * 1) RSB underflow |
1800 | * |
1801 | * Some Intel parts have "bottomless RSB". When the RSB is empty, |
1802 | * speculated return targets may come from the branch predictor, |
1803 | * which could have a user-poisoned BTB or BHB entry. |
1804 | * |
1805 | * AMD has it even worse: *all* returns are speculated from the BTB, |
1806 | * regardless of the state of the RSB. |
1807 | * |
1808 | * When IBRS or eIBRS is enabled, the "user -> kernel" attack |
1809 | * scenario is mitigated by the IBRS branch prediction isolation |
1810 | * properties, so the RSB buffer filling wouldn't be necessary to |
1811 | * protect against this type of attack. |
1812 | * |
1813 | * The "user -> user" attack scenario is mitigated by RSB filling. |
1814 | * |
1815 | * 2) Poisoned RSB entry |
1816 | * |
1817 | * If the 'next' in-kernel return stack is shorter than 'prev', |
1818 | * 'next' could be tricked into speculating with a user-poisoned RSB |
1819 | * entry. |
1820 | * |
1821 | * The "user -> kernel" attack scenario is mitigated by SMEP and |
1822 | * eIBRS. |
1823 | * |
1824 | * The "user -> user" scenario, also known as SpectreBHB, requires |
1825 | * RSB clearing. |
1826 | * |
1827 | * So to mitigate all cases, unconditionally fill RSB on context |
1828 | * switches. |
1829 | * |
1830 | * FIXME: Is this pointless for retbleed-affected AMD? |
1831 | */ |
1832 | setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); |
1833 | pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n" ); |
1834 | |
1835 | spectre_v2_determine_rsb_fill_type_at_vmexit(mode); |
1836 | |
1837 | /* |
1838 | * Retpoline protects the kernel, but doesn't protect firmware. IBRS |
1839 | * and Enhanced IBRS protect firmware too, so enable IBRS around |
1840 | * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't |
1841 | * otherwise enabled. |
1842 | * |
1843 | * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because |
1844 | * the user might select retpoline on the kernel command line and if |
1845 | * the CPU supports Enhanced IBRS, kernel might un-intentionally not |
1846 | * enable IBRS around firmware calls. |
1847 | */ |
1848 | if (boot_cpu_has_bug(X86_BUG_RETBLEED) && |
1849 | boot_cpu_has(X86_FEATURE_IBPB) && |
1850 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || |
1851 | boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { |
1852 | |
1853 | if (retbleed_cmd != RETBLEED_CMD_IBPB) { |
1854 | setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); |
1855 | pr_info("Enabling Speculation Barrier for firmware calls\n" ); |
1856 | } |
1857 | |
1858 | } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { |
1859 | setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); |
1860 | pr_info("Enabling Restricted Speculation for firmware calls\n" ); |
1861 | } |
1862 | |
1863 | /* Set up IBPB and STIBP depending on the general spectre V2 command */ |
1864 | spectre_v2_cmd = cmd; |
1865 | } |
1866 | |
1867 | static void update_stibp_msr(void * __unused) |
1868 | { |
1869 | u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); |
1870 | update_spec_ctrl(val); |
1871 | } |
1872 | |
1873 | /* Update x86_spec_ctrl_base in case SMT state changed. */ |
1874 | static void update_stibp_strict(void) |
1875 | { |
1876 | u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; |
1877 | |
1878 | if (sched_smt_active()) |
1879 | mask |= SPEC_CTRL_STIBP; |
1880 | |
1881 | if (mask == x86_spec_ctrl_base) |
1882 | return; |
1883 | |
1884 | pr_info("Update user space SMT mitigation: STIBP %s\n" , |
1885 | mask & SPEC_CTRL_STIBP ? "always-on" : "off" ); |
1886 | x86_spec_ctrl_base = mask; |
1887 | on_each_cpu(func: update_stibp_msr, NULL, wait: 1); |
1888 | } |
1889 | |
1890 | /* Update the static key controlling the evaluation of TIF_SPEC_IB */ |
1891 | static void update_indir_branch_cond(void) |
1892 | { |
1893 | if (sched_smt_active()) |
1894 | static_branch_enable(&switch_to_cond_stibp); |
1895 | else |
1896 | static_branch_disable(&switch_to_cond_stibp); |
1897 | } |
1898 | |
1899 | #undef pr_fmt |
1900 | #define pr_fmt(fmt) fmt |
1901 | |
1902 | /* Update the static key controlling the MDS CPU buffer clear in idle */ |
1903 | static void update_mds_branch_idle(void) |
1904 | { |
1905 | /* |
1906 | * Enable the idle clearing if SMT is active on CPUs which are |
1907 | * affected only by MSBDS and not any other MDS variant. |
1908 | * |
1909 | * The other variants cannot be mitigated when SMT is enabled, so |
1910 | * clearing the buffers on idle just to prevent the Store Buffer |
1911 | * repartitioning leak would be a window dressing exercise. |
1912 | */ |
1913 | if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) |
1914 | return; |
1915 | |
1916 | if (sched_smt_active()) { |
1917 | static_branch_enable(&mds_idle_clear); |
1918 | } else if (mmio_mitigation == MMIO_MITIGATION_OFF || |
1919 | (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { |
1920 | static_branch_disable(&mds_idle_clear); |
1921 | } |
1922 | } |
1923 | |
1924 | #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" |
1925 | #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" |
1926 | #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" |
1927 | |
1928 | void cpu_bugs_smt_update(void) |
1929 | { |
1930 | mutex_lock(&spec_ctrl_mutex); |
1931 | |
1932 | if (sched_smt_active() && unprivileged_ebpf_enabled() && |
1933 | spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) |
1934 | pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); |
1935 | |
1936 | switch (spectre_v2_user_stibp) { |
1937 | case SPECTRE_V2_USER_NONE: |
1938 | break; |
1939 | case SPECTRE_V2_USER_STRICT: |
1940 | case SPECTRE_V2_USER_STRICT_PREFERRED: |
1941 | update_stibp_strict(); |
1942 | break; |
1943 | case SPECTRE_V2_USER_PRCTL: |
1944 | case SPECTRE_V2_USER_SECCOMP: |
1945 | update_indir_branch_cond(); |
1946 | break; |
1947 | } |
1948 | |
1949 | switch (mds_mitigation) { |
1950 | case MDS_MITIGATION_FULL: |
1951 | case MDS_MITIGATION_VMWERV: |
1952 | if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) |
1953 | pr_warn_once(MDS_MSG_SMT); |
1954 | update_mds_branch_idle(); |
1955 | break; |
1956 | case MDS_MITIGATION_OFF: |
1957 | break; |
1958 | } |
1959 | |
1960 | switch (taa_mitigation) { |
1961 | case TAA_MITIGATION_VERW: |
1962 | case TAA_MITIGATION_UCODE_NEEDED: |
1963 | if (sched_smt_active()) |
1964 | pr_warn_once(TAA_MSG_SMT); |
1965 | break; |
1966 | case TAA_MITIGATION_TSX_DISABLED: |
1967 | case TAA_MITIGATION_OFF: |
1968 | break; |
1969 | } |
1970 | |
1971 | switch (mmio_mitigation) { |
1972 | case MMIO_MITIGATION_VERW: |
1973 | case MMIO_MITIGATION_UCODE_NEEDED: |
1974 | if (sched_smt_active()) |
1975 | pr_warn_once(MMIO_MSG_SMT); |
1976 | break; |
1977 | case MMIO_MITIGATION_OFF: |
1978 | break; |
1979 | } |
1980 | |
1981 | mutex_unlock(lock: &spec_ctrl_mutex); |
1982 | } |
1983 | |
1984 | #undef pr_fmt |
1985 | #define pr_fmt(fmt) "Speculative Store Bypass: " fmt |
1986 | |
1987 | static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; |
1988 | |
1989 | /* The kernel command line selection */ |
1990 | enum ssb_mitigation_cmd { |
1991 | SPEC_STORE_BYPASS_CMD_NONE, |
1992 | SPEC_STORE_BYPASS_CMD_AUTO, |
1993 | SPEC_STORE_BYPASS_CMD_ON, |
1994 | SPEC_STORE_BYPASS_CMD_PRCTL, |
1995 | SPEC_STORE_BYPASS_CMD_SECCOMP, |
1996 | }; |
1997 | |
1998 | static const char * const ssb_strings[] = { |
1999 | [SPEC_STORE_BYPASS_NONE] = "Vulnerable" , |
2000 | [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled" , |
2001 | [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl" , |
2002 | [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp" , |
2003 | }; |
2004 | |
2005 | static const struct { |
2006 | const char *option; |
2007 | enum ssb_mitigation_cmd cmd; |
2008 | } ssb_mitigation_options[] __initconst = { |
2009 | { "auto" , SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ |
2010 | { "on" , SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ |
2011 | { "off" , SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ |
2012 | { "prctl" , SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ |
2013 | { "seccomp" , SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ |
2014 | }; |
2015 | |
2016 | static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) |
2017 | { |
2018 | enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; |
2019 | char arg[20]; |
2020 | int ret, i; |
2021 | |
2022 | if (cmdline_find_option_bool(cmdline_ptr: boot_command_line, option: "nospec_store_bypass_disable" ) || |
2023 | cpu_mitigations_off()) { |
2024 | return SPEC_STORE_BYPASS_CMD_NONE; |
2025 | } else { |
2026 | ret = cmdline_find_option(cmdline_ptr: boot_command_line, option: "spec_store_bypass_disable" , |
2027 | buffer: arg, bufsize: sizeof(arg)); |
2028 | if (ret < 0) |
2029 | return SPEC_STORE_BYPASS_CMD_AUTO; |
2030 | |
2031 | for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { |
2032 | if (!match_option(arg, arglen: ret, opt: ssb_mitigation_options[i].option)) |
2033 | continue; |
2034 | |
2035 | cmd = ssb_mitigation_options[i].cmd; |
2036 | break; |
2037 | } |
2038 | |
2039 | if (i >= ARRAY_SIZE(ssb_mitigation_options)) { |
2040 | pr_err("unknown option (%s). Switching to AUTO select\n" , arg); |
2041 | return SPEC_STORE_BYPASS_CMD_AUTO; |
2042 | } |
2043 | } |
2044 | |
2045 | return cmd; |
2046 | } |
2047 | |
2048 | static enum ssb_mitigation __init __ssb_select_mitigation(void) |
2049 | { |
2050 | enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; |
2051 | enum ssb_mitigation_cmd cmd; |
2052 | |
2053 | if (!boot_cpu_has(X86_FEATURE_SSBD)) |
2054 | return mode; |
2055 | |
2056 | cmd = ssb_parse_cmdline(); |
2057 | if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && |
2058 | (cmd == SPEC_STORE_BYPASS_CMD_NONE || |
2059 | cmd == SPEC_STORE_BYPASS_CMD_AUTO)) |
2060 | return mode; |
2061 | |
2062 | switch (cmd) { |
2063 | case SPEC_STORE_BYPASS_CMD_SECCOMP: |
2064 | /* |
2065 | * Choose prctl+seccomp as the default mode if seccomp is |
2066 | * enabled. |
2067 | */ |
2068 | if (IS_ENABLED(CONFIG_SECCOMP)) |
2069 | mode = SPEC_STORE_BYPASS_SECCOMP; |
2070 | else |
2071 | mode = SPEC_STORE_BYPASS_PRCTL; |
2072 | break; |
2073 | case SPEC_STORE_BYPASS_CMD_ON: |
2074 | mode = SPEC_STORE_BYPASS_DISABLE; |
2075 | break; |
2076 | case SPEC_STORE_BYPASS_CMD_AUTO: |
2077 | case SPEC_STORE_BYPASS_CMD_PRCTL: |
2078 | mode = SPEC_STORE_BYPASS_PRCTL; |
2079 | break; |
2080 | case SPEC_STORE_BYPASS_CMD_NONE: |
2081 | break; |
2082 | } |
2083 | |
2084 | /* |
2085 | * We have three CPU feature flags that are in play here: |
2086 | * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. |
2087 | * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass |
2088 | * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation |
2089 | */ |
2090 | if (mode == SPEC_STORE_BYPASS_DISABLE) { |
2091 | setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); |
2092 | /* |
2093 | * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may |
2094 | * use a completely different MSR and bit dependent on family. |
2095 | */ |
2096 | if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && |
2097 | !static_cpu_has(X86_FEATURE_AMD_SSBD)) { |
2098 | x86_amd_ssb_disable(); |
2099 | } else { |
2100 | x86_spec_ctrl_base |= SPEC_CTRL_SSBD; |
2101 | update_spec_ctrl(val: x86_spec_ctrl_base); |
2102 | } |
2103 | } |
2104 | |
2105 | return mode; |
2106 | } |
2107 | |
2108 | static void ssb_select_mitigation(void) |
2109 | { |
2110 | ssb_mode = __ssb_select_mitigation(); |
2111 | |
2112 | if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) |
2113 | pr_info("%s\n" , ssb_strings[ssb_mode]); |
2114 | } |
2115 | |
2116 | #undef pr_fmt |
2117 | #define pr_fmt(fmt) "Speculation prctl: " fmt |
2118 | |
2119 | static void task_update_spec_tif(struct task_struct *tsk) |
2120 | { |
2121 | /* Force the update of the real TIF bits */ |
2122 | set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); |
2123 | |
2124 | /* |
2125 | * Immediately update the speculation control MSRs for the current |
2126 | * task, but for a non-current task delay setting the CPU |
2127 | * mitigation until it is scheduled next. |
2128 | * |
2129 | * This can only happen for SECCOMP mitigation. For PRCTL it's |
2130 | * always the current task. |
2131 | */ |
2132 | if (tsk == current) |
2133 | speculation_ctrl_update_current(); |
2134 | } |
2135 | |
2136 | static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl) |
2137 | { |
2138 | |
2139 | if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) |
2140 | return -EPERM; |
2141 | |
2142 | switch (ctrl) { |
2143 | case PR_SPEC_ENABLE: |
2144 | set_ti_thread_flag(ti: &task->thread_info, TIF_SPEC_L1D_FLUSH); |
2145 | return 0; |
2146 | case PR_SPEC_DISABLE: |
2147 | clear_ti_thread_flag(ti: &task->thread_info, TIF_SPEC_L1D_FLUSH); |
2148 | return 0; |
2149 | default: |
2150 | return -ERANGE; |
2151 | } |
2152 | } |
2153 | |
2154 | static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) |
2155 | { |
2156 | if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && |
2157 | ssb_mode != SPEC_STORE_BYPASS_SECCOMP) |
2158 | return -ENXIO; |
2159 | |
2160 | switch (ctrl) { |
2161 | case PR_SPEC_ENABLE: |
2162 | /* If speculation is force disabled, enable is not allowed */ |
2163 | if (task_spec_ssb_force_disable(p: task)) |
2164 | return -EPERM; |
2165 | task_clear_spec_ssb_disable(p: task); |
2166 | task_clear_spec_ssb_noexec(p: task); |
2167 | task_update_spec_tif(tsk: task); |
2168 | break; |
2169 | case PR_SPEC_DISABLE: |
2170 | task_set_spec_ssb_disable(p: task); |
2171 | task_clear_spec_ssb_noexec(p: task); |
2172 | task_update_spec_tif(tsk: task); |
2173 | break; |
2174 | case PR_SPEC_FORCE_DISABLE: |
2175 | task_set_spec_ssb_disable(p: task); |
2176 | task_set_spec_ssb_force_disable(p: task); |
2177 | task_clear_spec_ssb_noexec(p: task); |
2178 | task_update_spec_tif(tsk: task); |
2179 | break; |
2180 | case PR_SPEC_DISABLE_NOEXEC: |
2181 | if (task_spec_ssb_force_disable(p: task)) |
2182 | return -EPERM; |
2183 | task_set_spec_ssb_disable(p: task); |
2184 | task_set_spec_ssb_noexec(p: task); |
2185 | task_update_spec_tif(tsk: task); |
2186 | break; |
2187 | default: |
2188 | return -ERANGE; |
2189 | } |
2190 | return 0; |
2191 | } |
2192 | |
2193 | static bool is_spec_ib_user_controlled(void) |
2194 | { |
2195 | return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || |
2196 | spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || |
2197 | spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || |
2198 | spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; |
2199 | } |
2200 | |
2201 | static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) |
2202 | { |
2203 | switch (ctrl) { |
2204 | case PR_SPEC_ENABLE: |
2205 | if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && |
2206 | spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) |
2207 | return 0; |
2208 | |
2209 | /* |
2210 | * With strict mode for both IBPB and STIBP, the instruction |
2211 | * code paths avoid checking this task flag and instead, |
2212 | * unconditionally run the instruction. However, STIBP and IBPB |
2213 | * are independent and either can be set to conditionally |
2214 | * enabled regardless of the mode of the other. |
2215 | * |
2216 | * If either is set to conditional, allow the task flag to be |
2217 | * updated, unless it was force-disabled by a previous prctl |
2218 | * call. Currently, this is possible on an AMD CPU which has the |
2219 | * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the |
2220 | * kernel is booted with 'spectre_v2_user=seccomp', then |
2221 | * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and |
2222 | * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. |
2223 | */ |
2224 | if (!is_spec_ib_user_controlled() || |
2225 | task_spec_ib_force_disable(p: task)) |
2226 | return -EPERM; |
2227 | |
2228 | task_clear_spec_ib_disable(p: task); |
2229 | task_update_spec_tif(tsk: task); |
2230 | break; |
2231 | case PR_SPEC_DISABLE: |
2232 | case PR_SPEC_FORCE_DISABLE: |
2233 | /* |
2234 | * Indirect branch speculation is always allowed when |
2235 | * mitigation is force disabled. |
2236 | */ |
2237 | if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && |
2238 | spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) |
2239 | return -EPERM; |
2240 | |
2241 | if (!is_spec_ib_user_controlled()) |
2242 | return 0; |
2243 | |
2244 | task_set_spec_ib_disable(p: task); |
2245 | if (ctrl == PR_SPEC_FORCE_DISABLE) |
2246 | task_set_spec_ib_force_disable(p: task); |
2247 | task_update_spec_tif(tsk: task); |
2248 | if (task == current) |
2249 | indirect_branch_prediction_barrier(); |
2250 | break; |
2251 | default: |
2252 | return -ERANGE; |
2253 | } |
2254 | return 0; |
2255 | } |
2256 | |
2257 | int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, |
2258 | unsigned long ctrl) |
2259 | { |
2260 | switch (which) { |
2261 | case PR_SPEC_STORE_BYPASS: |
2262 | return ssb_prctl_set(task, ctrl); |
2263 | case PR_SPEC_INDIRECT_BRANCH: |
2264 | return ib_prctl_set(task, ctrl); |
2265 | case PR_SPEC_L1D_FLUSH: |
2266 | return l1d_flush_prctl_set(task, ctrl); |
2267 | default: |
2268 | return -ENODEV; |
2269 | } |
2270 | } |
2271 | |
2272 | #ifdef CONFIG_SECCOMP |
2273 | void arch_seccomp_spec_mitigate(struct task_struct *task) |
2274 | { |
2275 | if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) |
2276 | ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); |
2277 | if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || |
2278 | spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) |
2279 | ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); |
2280 | } |
2281 | #endif |
2282 | |
2283 | static int l1d_flush_prctl_get(struct task_struct *task) |
2284 | { |
2285 | if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) |
2286 | return PR_SPEC_FORCE_DISABLE; |
2287 | |
2288 | if (test_ti_thread_flag(ti: &task->thread_info, TIF_SPEC_L1D_FLUSH)) |
2289 | return PR_SPEC_PRCTL | PR_SPEC_ENABLE; |
2290 | else |
2291 | return PR_SPEC_PRCTL | PR_SPEC_DISABLE; |
2292 | } |
2293 | |
2294 | static int ssb_prctl_get(struct task_struct *task) |
2295 | { |
2296 | switch (ssb_mode) { |
2297 | case SPEC_STORE_BYPASS_NONE: |
2298 | if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) |
2299 | return PR_SPEC_ENABLE; |
2300 | return PR_SPEC_NOT_AFFECTED; |
2301 | case SPEC_STORE_BYPASS_DISABLE: |
2302 | return PR_SPEC_DISABLE; |
2303 | case SPEC_STORE_BYPASS_SECCOMP: |
2304 | case SPEC_STORE_BYPASS_PRCTL: |
2305 | if (task_spec_ssb_force_disable(p: task)) |
2306 | return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; |
2307 | if (task_spec_ssb_noexec(p: task)) |
2308 | return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; |
2309 | if (task_spec_ssb_disable(p: task)) |
2310 | return PR_SPEC_PRCTL | PR_SPEC_DISABLE; |
2311 | return PR_SPEC_PRCTL | PR_SPEC_ENABLE; |
2312 | } |
2313 | BUG(); |
2314 | } |
2315 | |
2316 | static int ib_prctl_get(struct task_struct *task) |
2317 | { |
2318 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) |
2319 | return PR_SPEC_NOT_AFFECTED; |
2320 | |
2321 | if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && |
2322 | spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) |
2323 | return PR_SPEC_ENABLE; |
2324 | else if (is_spec_ib_user_controlled()) { |
2325 | if (task_spec_ib_force_disable(p: task)) |
2326 | return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; |
2327 | if (task_spec_ib_disable(p: task)) |
2328 | return PR_SPEC_PRCTL | PR_SPEC_DISABLE; |
2329 | return PR_SPEC_PRCTL | PR_SPEC_ENABLE; |
2330 | } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || |
2331 | spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || |
2332 | spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) |
2333 | return PR_SPEC_DISABLE; |
2334 | else |
2335 | return PR_SPEC_NOT_AFFECTED; |
2336 | } |
2337 | |
2338 | int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) |
2339 | { |
2340 | switch (which) { |
2341 | case PR_SPEC_STORE_BYPASS: |
2342 | return ssb_prctl_get(task); |
2343 | case PR_SPEC_INDIRECT_BRANCH: |
2344 | return ib_prctl_get(task); |
2345 | case PR_SPEC_L1D_FLUSH: |
2346 | return l1d_flush_prctl_get(task); |
2347 | default: |
2348 | return -ENODEV; |
2349 | } |
2350 | } |
2351 | |
2352 | void x86_spec_ctrl_setup_ap(void) |
2353 | { |
2354 | if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) |
2355 | update_spec_ctrl(val: x86_spec_ctrl_base); |
2356 | |
2357 | if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) |
2358 | x86_amd_ssb_disable(); |
2359 | } |
2360 | |
2361 | bool itlb_multihit_kvm_mitigation; |
2362 | EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); |
2363 | |
2364 | #undef pr_fmt |
2365 | #define pr_fmt(fmt) "L1TF: " fmt |
2366 | |
2367 | /* Default mitigation for L1TF-affected CPUs */ |
2368 | enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH; |
2369 | #if IS_ENABLED(CONFIG_KVM_INTEL) |
2370 | EXPORT_SYMBOL_GPL(l1tf_mitigation); |
2371 | #endif |
2372 | enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; |
2373 | EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); |
2374 | |
2375 | /* |
2376 | * These CPUs all support 44bits physical address space internally in the |
2377 | * cache but CPUID can report a smaller number of physical address bits. |
2378 | * |
2379 | * The L1TF mitigation uses the top most address bit for the inversion of |
2380 | * non present PTEs. When the installed memory reaches into the top most |
2381 | * address bit due to memory holes, which has been observed on machines |
2382 | * which report 36bits physical address bits and have 32G RAM installed, |
2383 | * then the mitigation range check in l1tf_select_mitigation() triggers. |
2384 | * This is a false positive because the mitigation is still possible due to |
2385 | * the fact that the cache uses 44bit internally. Use the cache bits |
2386 | * instead of the reported physical bits and adjust them on the affected |
2387 | * machines to 44bit if the reported bits are less than 44. |
2388 | */ |
2389 | static void override_cache_bits(struct cpuinfo_x86 *c) |
2390 | { |
2391 | if (c->x86 != 6) |
2392 | return; |
2393 | |
2394 | switch (c->x86_model) { |
2395 | case INTEL_FAM6_NEHALEM: |
2396 | case INTEL_FAM6_WESTMERE: |
2397 | case INTEL_FAM6_SANDYBRIDGE: |
2398 | case INTEL_FAM6_IVYBRIDGE: |
2399 | case INTEL_FAM6_HASWELL: |
2400 | case INTEL_FAM6_HASWELL_L: |
2401 | case INTEL_FAM6_HASWELL_G: |
2402 | case INTEL_FAM6_BROADWELL: |
2403 | case INTEL_FAM6_BROADWELL_G: |
2404 | case INTEL_FAM6_SKYLAKE_L: |
2405 | case INTEL_FAM6_SKYLAKE: |
2406 | case INTEL_FAM6_KABYLAKE_L: |
2407 | case INTEL_FAM6_KABYLAKE: |
2408 | if (c->x86_cache_bits < 44) |
2409 | c->x86_cache_bits = 44; |
2410 | break; |
2411 | } |
2412 | } |
2413 | |
2414 | static void __init l1tf_select_mitigation(void) |
2415 | { |
2416 | u64 half_pa; |
2417 | |
2418 | if (!boot_cpu_has_bug(X86_BUG_L1TF)) |
2419 | return; |
2420 | |
2421 | if (cpu_mitigations_off()) |
2422 | l1tf_mitigation = L1TF_MITIGATION_OFF; |
2423 | else if (cpu_mitigations_auto_nosmt()) |
2424 | l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; |
2425 | |
2426 | override_cache_bits(c: &boot_cpu_data); |
2427 | |
2428 | switch (l1tf_mitigation) { |
2429 | case L1TF_MITIGATION_OFF: |
2430 | case L1TF_MITIGATION_FLUSH_NOWARN: |
2431 | case L1TF_MITIGATION_FLUSH: |
2432 | break; |
2433 | case L1TF_MITIGATION_FLUSH_NOSMT: |
2434 | case L1TF_MITIGATION_FULL: |
2435 | cpu_smt_disable(force: false); |
2436 | break; |
2437 | case L1TF_MITIGATION_FULL_FORCE: |
2438 | cpu_smt_disable(force: true); |
2439 | break; |
2440 | } |
2441 | |
2442 | #if CONFIG_PGTABLE_LEVELS == 2 |
2443 | pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n" ); |
2444 | return; |
2445 | #endif |
2446 | |
2447 | half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; |
2448 | if (l1tf_mitigation != L1TF_MITIGATION_OFF && |
2449 | e820__mapped_any(start: half_pa, ULLONG_MAX - half_pa, type: E820_TYPE_RAM)) { |
2450 | pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n" ); |
2451 | pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n" , |
2452 | half_pa); |
2453 | pr_info("However, doing so will make a part of your RAM unusable.\n" ); |
2454 | pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n" ); |
2455 | return; |
2456 | } |
2457 | |
2458 | setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); |
2459 | } |
2460 | |
2461 | static int __init l1tf_cmdline(char *str) |
2462 | { |
2463 | if (!boot_cpu_has_bug(X86_BUG_L1TF)) |
2464 | return 0; |
2465 | |
2466 | if (!str) |
2467 | return -EINVAL; |
2468 | |
2469 | if (!strcmp(str, "off" )) |
2470 | l1tf_mitigation = L1TF_MITIGATION_OFF; |
2471 | else if (!strcmp(str, "flush,nowarn" )) |
2472 | l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; |
2473 | else if (!strcmp(str, "flush" )) |
2474 | l1tf_mitigation = L1TF_MITIGATION_FLUSH; |
2475 | else if (!strcmp(str, "flush,nosmt" )) |
2476 | l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; |
2477 | else if (!strcmp(str, "full" )) |
2478 | l1tf_mitigation = L1TF_MITIGATION_FULL; |
2479 | else if (!strcmp(str, "full,force" )) |
2480 | l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; |
2481 | |
2482 | return 0; |
2483 | } |
2484 | early_param("l1tf" , l1tf_cmdline); |
2485 | |
2486 | #undef pr_fmt |
2487 | #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt |
2488 | |
2489 | enum srso_mitigation { |
2490 | SRSO_MITIGATION_NONE, |
2491 | SRSO_MITIGATION_UCODE_NEEDED, |
2492 | SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED, |
2493 | SRSO_MITIGATION_MICROCODE, |
2494 | SRSO_MITIGATION_SAFE_RET, |
2495 | SRSO_MITIGATION_IBPB, |
2496 | SRSO_MITIGATION_IBPB_ON_VMEXIT, |
2497 | }; |
2498 | |
2499 | enum srso_mitigation_cmd { |
2500 | SRSO_CMD_OFF, |
2501 | SRSO_CMD_MICROCODE, |
2502 | SRSO_CMD_SAFE_RET, |
2503 | SRSO_CMD_IBPB, |
2504 | SRSO_CMD_IBPB_ON_VMEXIT, |
2505 | }; |
2506 | |
2507 | static const char * const srso_strings[] = { |
2508 | [SRSO_MITIGATION_NONE] = "Vulnerable" , |
2509 | [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode" , |
2510 | [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode" , |
2511 | [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET" , |
2512 | [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET" , |
2513 | [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB" , |
2514 | [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only" |
2515 | }; |
2516 | |
2517 | static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; |
2518 | static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET; |
2519 | |
2520 | static int __init srso_parse_cmdline(char *str) |
2521 | { |
2522 | if (!str) |
2523 | return -EINVAL; |
2524 | |
2525 | if (!strcmp(str, "off" )) |
2526 | srso_cmd = SRSO_CMD_OFF; |
2527 | else if (!strcmp(str, "microcode" )) |
2528 | srso_cmd = SRSO_CMD_MICROCODE; |
2529 | else if (!strcmp(str, "safe-ret" )) |
2530 | srso_cmd = SRSO_CMD_SAFE_RET; |
2531 | else if (!strcmp(str, "ibpb" )) |
2532 | srso_cmd = SRSO_CMD_IBPB; |
2533 | else if (!strcmp(str, "ibpb-vmexit" )) |
2534 | srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT; |
2535 | else |
2536 | pr_err("Ignoring unknown SRSO option (%s)." , str); |
2537 | |
2538 | return 0; |
2539 | } |
2540 | early_param("spec_rstack_overflow" , srso_parse_cmdline); |
2541 | |
2542 | #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options." |
2543 | |
2544 | static void __init srso_select_mitigation(void) |
2545 | { |
2546 | bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE); |
2547 | |
2548 | if (cpu_mitigations_off()) |
2549 | return; |
2550 | |
2551 | if (!boot_cpu_has_bug(X86_BUG_SRSO)) { |
2552 | if (boot_cpu_has(X86_FEATURE_SBPB)) |
2553 | x86_pred_cmd = PRED_CMD_SBPB; |
2554 | return; |
2555 | } |
2556 | |
2557 | if (has_microcode) { |
2558 | /* |
2559 | * Zen1/2 with SMT off aren't vulnerable after the right |
2560 | * IBPB microcode has been applied. |
2561 | * |
2562 | * Zen1/2 don't have SBPB, no need to try to enable it here. |
2563 | */ |
2564 | if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { |
2565 | setup_force_cpu_cap(X86_FEATURE_SRSO_NO); |
2566 | return; |
2567 | } |
2568 | |
2569 | if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { |
2570 | srso_mitigation = SRSO_MITIGATION_IBPB; |
2571 | goto out; |
2572 | } |
2573 | } else { |
2574 | pr_warn("IBPB-extending microcode not applied!\n" ); |
2575 | pr_warn(SRSO_NOTICE); |
2576 | |
2577 | /* may be overwritten by SRSO_CMD_SAFE_RET below */ |
2578 | srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED; |
2579 | } |
2580 | |
2581 | switch (srso_cmd) { |
2582 | case SRSO_CMD_OFF: |
2583 | if (boot_cpu_has(X86_FEATURE_SBPB)) |
2584 | x86_pred_cmd = PRED_CMD_SBPB; |
2585 | return; |
2586 | |
2587 | case SRSO_CMD_MICROCODE: |
2588 | if (has_microcode) { |
2589 | srso_mitigation = SRSO_MITIGATION_MICROCODE; |
2590 | pr_warn(SRSO_NOTICE); |
2591 | } |
2592 | break; |
2593 | |
2594 | case SRSO_CMD_SAFE_RET: |
2595 | if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) { |
2596 | /* |
2597 | * Enable the return thunk for generated code |
2598 | * like ftrace, static_call, etc. |
2599 | */ |
2600 | setup_force_cpu_cap(X86_FEATURE_RETHUNK); |
2601 | setup_force_cpu_cap(X86_FEATURE_UNRET); |
2602 | |
2603 | if (boot_cpu_data.x86 == 0x19) { |
2604 | setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); |
2605 | x86_return_thunk = srso_alias_return_thunk; |
2606 | } else { |
2607 | setup_force_cpu_cap(X86_FEATURE_SRSO); |
2608 | x86_return_thunk = srso_return_thunk; |
2609 | } |
2610 | if (has_microcode) |
2611 | srso_mitigation = SRSO_MITIGATION_SAFE_RET; |
2612 | else |
2613 | srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED; |
2614 | } else { |
2615 | pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n" ); |
2616 | } |
2617 | break; |
2618 | |
2619 | case SRSO_CMD_IBPB: |
2620 | if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { |
2621 | if (has_microcode) { |
2622 | setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); |
2623 | srso_mitigation = SRSO_MITIGATION_IBPB; |
2624 | } |
2625 | } else { |
2626 | pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n" ); |
2627 | } |
2628 | break; |
2629 | |
2630 | case SRSO_CMD_IBPB_ON_VMEXIT: |
2631 | if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) { |
2632 | if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) { |
2633 | setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); |
2634 | srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; |
2635 | } |
2636 | } else { |
2637 | pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n" ); |
2638 | } |
2639 | break; |
2640 | } |
2641 | |
2642 | out: |
2643 | pr_info("%s\n" , srso_strings[srso_mitigation]); |
2644 | } |
2645 | |
2646 | #undef pr_fmt |
2647 | #define pr_fmt(fmt) fmt |
2648 | |
2649 | #ifdef CONFIG_SYSFS |
2650 | |
2651 | #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" |
2652 | |
2653 | #if IS_ENABLED(CONFIG_KVM_INTEL) |
2654 | static const char * const l1tf_vmx_states[] = { |
2655 | [VMENTER_L1D_FLUSH_AUTO] = "auto" , |
2656 | [VMENTER_L1D_FLUSH_NEVER] = "vulnerable" , |
2657 | [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes" , |
2658 | [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes" , |
2659 | [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled" , |
2660 | [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" |
2661 | }; |
2662 | |
2663 | static ssize_t l1tf_show_state(char *buf) |
2664 | { |
2665 | if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) |
2666 | return sysfs_emit(buf, fmt: "%s\n" , L1TF_DEFAULT_MSG); |
2667 | |
2668 | if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || |
2669 | (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && |
2670 | sched_smt_active())) { |
2671 | return sysfs_emit(buf, fmt: "%s; VMX: %s\n" , L1TF_DEFAULT_MSG, |
2672 | l1tf_vmx_states[l1tf_vmx_mitigation]); |
2673 | } |
2674 | |
2675 | return sysfs_emit(buf, fmt: "%s; VMX: %s, SMT %s\n" , L1TF_DEFAULT_MSG, |
2676 | l1tf_vmx_states[l1tf_vmx_mitigation], |
2677 | sched_smt_active() ? "vulnerable" : "disabled" ); |
2678 | } |
2679 | |
2680 | static ssize_t itlb_multihit_show_state(char *buf) |
2681 | { |
2682 | if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || |
2683 | !boot_cpu_has(X86_FEATURE_VMX)) |
2684 | return sysfs_emit(buf, fmt: "KVM: Mitigation: VMX unsupported\n" ); |
2685 | else if (!(cr4_read_shadow() & X86_CR4_VMXE)) |
2686 | return sysfs_emit(buf, fmt: "KVM: Mitigation: VMX disabled\n" ); |
2687 | else if (itlb_multihit_kvm_mitigation) |
2688 | return sysfs_emit(buf, fmt: "KVM: Mitigation: Split huge pages\n" ); |
2689 | else |
2690 | return sysfs_emit(buf, fmt: "KVM: Vulnerable\n" ); |
2691 | } |
2692 | #else |
2693 | static ssize_t l1tf_show_state(char *buf) |
2694 | { |
2695 | return sysfs_emit(buf, "%s\n" , L1TF_DEFAULT_MSG); |
2696 | } |
2697 | |
2698 | static ssize_t itlb_multihit_show_state(char *buf) |
2699 | { |
2700 | return sysfs_emit(buf, "Processor vulnerable\n" ); |
2701 | } |
2702 | #endif |
2703 | |
2704 | static ssize_t mds_show_state(char *buf) |
2705 | { |
2706 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { |
2707 | return sysfs_emit(buf, fmt: "%s; SMT Host state unknown\n" , |
2708 | mds_strings[mds_mitigation]); |
2709 | } |
2710 | |
2711 | if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { |
2712 | return sysfs_emit(buf, fmt: "%s; SMT %s\n" , mds_strings[mds_mitigation], |
2713 | (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : |
2714 | sched_smt_active() ? "mitigated" : "disabled" )); |
2715 | } |
2716 | |
2717 | return sysfs_emit(buf, fmt: "%s; SMT %s\n" , mds_strings[mds_mitigation], |
2718 | sched_smt_active() ? "vulnerable" : "disabled" ); |
2719 | } |
2720 | |
2721 | static ssize_t tsx_async_abort_show_state(char *buf) |
2722 | { |
2723 | if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || |
2724 | (taa_mitigation == TAA_MITIGATION_OFF)) |
2725 | return sysfs_emit(buf, fmt: "%s\n" , taa_strings[taa_mitigation]); |
2726 | |
2727 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { |
2728 | return sysfs_emit(buf, fmt: "%s; SMT Host state unknown\n" , |
2729 | taa_strings[taa_mitigation]); |
2730 | } |
2731 | |
2732 | return sysfs_emit(buf, fmt: "%s; SMT %s\n" , taa_strings[taa_mitigation], |
2733 | sched_smt_active() ? "vulnerable" : "disabled" ); |
2734 | } |
2735 | |
2736 | static ssize_t mmio_stale_data_show_state(char *buf) |
2737 | { |
2738 | if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) |
2739 | return sysfs_emit(buf, fmt: "Unknown: No mitigations\n" ); |
2740 | |
2741 | if (mmio_mitigation == MMIO_MITIGATION_OFF) |
2742 | return sysfs_emit(buf, fmt: "%s\n" , mmio_strings[mmio_mitigation]); |
2743 | |
2744 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { |
2745 | return sysfs_emit(buf, fmt: "%s; SMT Host state unknown\n" , |
2746 | mmio_strings[mmio_mitigation]); |
2747 | } |
2748 | |
2749 | return sysfs_emit(buf, fmt: "%s; SMT %s\n" , mmio_strings[mmio_mitigation], |
2750 | sched_smt_active() ? "vulnerable" : "disabled" ); |
2751 | } |
2752 | |
2753 | static ssize_t rfds_show_state(char *buf) |
2754 | { |
2755 | return sysfs_emit(buf, fmt: "%s\n" , rfds_strings[rfds_mitigation]); |
2756 | } |
2757 | |
2758 | static char *stibp_state(void) |
2759 | { |
2760 | if (spectre_v2_in_eibrs_mode(mode: spectre_v2_enabled) && |
2761 | !boot_cpu_has(X86_FEATURE_AUTOIBRS)) |
2762 | return "" ; |
2763 | |
2764 | switch (spectre_v2_user_stibp) { |
2765 | case SPECTRE_V2_USER_NONE: |
2766 | return "; STIBP: disabled" ; |
2767 | case SPECTRE_V2_USER_STRICT: |
2768 | return "; STIBP: forced" ; |
2769 | case SPECTRE_V2_USER_STRICT_PREFERRED: |
2770 | return "; STIBP: always-on" ; |
2771 | case SPECTRE_V2_USER_PRCTL: |
2772 | case SPECTRE_V2_USER_SECCOMP: |
2773 | if (static_key_enabled(&switch_to_cond_stibp)) |
2774 | return "; STIBP: conditional" ; |
2775 | } |
2776 | return "" ; |
2777 | } |
2778 | |
2779 | static char *ibpb_state(void) |
2780 | { |
2781 | if (boot_cpu_has(X86_FEATURE_IBPB)) { |
2782 | if (static_key_enabled(&switch_mm_always_ibpb)) |
2783 | return "; IBPB: always-on" ; |
2784 | if (static_key_enabled(&switch_mm_cond_ibpb)) |
2785 | return "; IBPB: conditional" ; |
2786 | return "; IBPB: disabled" ; |
2787 | } |
2788 | return "" ; |
2789 | } |
2790 | |
2791 | static char *pbrsb_eibrs_state(void) |
2792 | { |
2793 | if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { |
2794 | if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || |
2795 | boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) |
2796 | return "; PBRSB-eIBRS: SW sequence" ; |
2797 | else |
2798 | return "; PBRSB-eIBRS: Vulnerable" ; |
2799 | } else { |
2800 | return "; PBRSB-eIBRS: Not affected" ; |
2801 | } |
2802 | } |
2803 | |
2804 | static const char *spectre_bhi_state(void) |
2805 | { |
2806 | if (!boot_cpu_has_bug(X86_BUG_BHI)) |
2807 | return "; BHI: Not affected" ; |
2808 | else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW)) |
2809 | return "; BHI: BHI_DIS_S" ; |
2810 | else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP)) |
2811 | return "; BHI: SW loop, KVM: SW loop" ; |
2812 | else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && |
2813 | !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) && |
2814 | rrsba_disabled) |
2815 | return "; BHI: Retpoline" ; |
2816 | else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT)) |
2817 | return "; BHI: Vulnerable, KVM: SW loop" ; |
2818 | |
2819 | return "; BHI: Vulnerable" ; |
2820 | } |
2821 | |
2822 | static ssize_t spectre_v2_show_state(char *buf) |
2823 | { |
2824 | if (spectre_v2_enabled == SPECTRE_V2_LFENCE) |
2825 | return sysfs_emit(buf, fmt: "Vulnerable: LFENCE\n" ); |
2826 | |
2827 | if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) |
2828 | return sysfs_emit(buf, fmt: "Vulnerable: eIBRS with unprivileged eBPF\n" ); |
2829 | |
2830 | if (sched_smt_active() && unprivileged_ebpf_enabled() && |
2831 | spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) |
2832 | return sysfs_emit(buf, fmt: "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n" ); |
2833 | |
2834 | return sysfs_emit(buf, fmt: "%s%s%s%s%s%s%s%s\n" , |
2835 | spectre_v2_strings[spectre_v2_enabled], |
2836 | ibpb_state(), |
2837 | boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "" , |
2838 | stibp_state(), |
2839 | boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "" , |
2840 | pbrsb_eibrs_state(), |
2841 | spectre_bhi_state(), |
2842 | /* this should always be at the end */ |
2843 | spectre_v2_module_string()); |
2844 | } |
2845 | |
2846 | static ssize_t srbds_show_state(char *buf) |
2847 | { |
2848 | return sysfs_emit(buf, fmt: "%s\n" , srbds_strings[srbds_mitigation]); |
2849 | } |
2850 | |
2851 | static ssize_t retbleed_show_state(char *buf) |
2852 | { |
2853 | if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || |
2854 | retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { |
2855 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
2856 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) |
2857 | return sysfs_emit(buf, fmt: "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n" ); |
2858 | |
2859 | return sysfs_emit(buf, fmt: "%s; SMT %s\n" , retbleed_strings[retbleed_mitigation], |
2860 | !sched_smt_active() ? "disabled" : |
2861 | spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || |
2862 | spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? |
2863 | "enabled with STIBP protection" : "vulnerable" ); |
2864 | } |
2865 | |
2866 | return sysfs_emit(buf, fmt: "%s\n" , retbleed_strings[retbleed_mitigation]); |
2867 | } |
2868 | |
2869 | static ssize_t srso_show_state(char *buf) |
2870 | { |
2871 | if (boot_cpu_has(X86_FEATURE_SRSO_NO)) |
2872 | return sysfs_emit(buf, fmt: "Mitigation: SMT disabled\n" ); |
2873 | |
2874 | return sysfs_emit(buf, fmt: "%s\n" , srso_strings[srso_mitigation]); |
2875 | } |
2876 | |
2877 | static ssize_t gds_show_state(char *buf) |
2878 | { |
2879 | return sysfs_emit(buf, fmt: "%s\n" , gds_strings[gds_mitigation]); |
2880 | } |
2881 | |
2882 | static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, |
2883 | char *buf, unsigned int bug) |
2884 | { |
2885 | if (!boot_cpu_has_bug(bug)) |
2886 | return sysfs_emit(buf, fmt: "Not affected\n" ); |
2887 | |
2888 | switch (bug) { |
2889 | case X86_BUG_CPU_MELTDOWN: |
2890 | if (boot_cpu_has(X86_FEATURE_PTI)) |
2891 | return sysfs_emit(buf, fmt: "Mitigation: PTI\n" ); |
2892 | |
2893 | if (hypervisor_is_type(type: X86_HYPER_XEN_PV)) |
2894 | return sysfs_emit(buf, fmt: "Unknown (XEN PV detected, hypervisor mitigation required)\n" ); |
2895 | |
2896 | break; |
2897 | |
2898 | case X86_BUG_SPECTRE_V1: |
2899 | return sysfs_emit(buf, fmt: "%s\n" , spectre_v1_strings[spectre_v1_mitigation]); |
2900 | |
2901 | case X86_BUG_SPECTRE_V2: |
2902 | return spectre_v2_show_state(buf); |
2903 | |
2904 | case X86_BUG_SPEC_STORE_BYPASS: |
2905 | return sysfs_emit(buf, fmt: "%s\n" , ssb_strings[ssb_mode]); |
2906 | |
2907 | case X86_BUG_L1TF: |
2908 | if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) |
2909 | return l1tf_show_state(buf); |
2910 | break; |
2911 | |
2912 | case X86_BUG_MDS: |
2913 | return mds_show_state(buf); |
2914 | |
2915 | case X86_BUG_TAA: |
2916 | return tsx_async_abort_show_state(buf); |
2917 | |
2918 | case X86_BUG_ITLB_MULTIHIT: |
2919 | return itlb_multihit_show_state(buf); |
2920 | |
2921 | case X86_BUG_SRBDS: |
2922 | return srbds_show_state(buf); |
2923 | |
2924 | case X86_BUG_MMIO_STALE_DATA: |
2925 | case X86_BUG_MMIO_UNKNOWN: |
2926 | return mmio_stale_data_show_state(buf); |
2927 | |
2928 | case X86_BUG_RETBLEED: |
2929 | return retbleed_show_state(buf); |
2930 | |
2931 | case X86_BUG_SRSO: |
2932 | return srso_show_state(buf); |
2933 | |
2934 | case X86_BUG_GDS: |
2935 | return gds_show_state(buf); |
2936 | |
2937 | case X86_BUG_RFDS: |
2938 | return rfds_show_state(buf); |
2939 | |
2940 | default: |
2941 | break; |
2942 | } |
2943 | |
2944 | return sysfs_emit(buf, fmt: "Vulnerable\n" ); |
2945 | } |
2946 | |
2947 | ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) |
2948 | { |
2949 | return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); |
2950 | } |
2951 | |
2952 | ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) |
2953 | { |
2954 | return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); |
2955 | } |
2956 | |
2957 | ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) |
2958 | { |
2959 | return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); |
2960 | } |
2961 | |
2962 | ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) |
2963 | { |
2964 | return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); |
2965 | } |
2966 | |
2967 | ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) |
2968 | { |
2969 | return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); |
2970 | } |
2971 | |
2972 | ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) |
2973 | { |
2974 | return cpu_show_common(dev, attr, buf, X86_BUG_MDS); |
2975 | } |
2976 | |
2977 | ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) |
2978 | { |
2979 | return cpu_show_common(dev, attr, buf, X86_BUG_TAA); |
2980 | } |
2981 | |
2982 | ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) |
2983 | { |
2984 | return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); |
2985 | } |
2986 | |
2987 | ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) |
2988 | { |
2989 | return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); |
2990 | } |
2991 | |
2992 | ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) |
2993 | { |
2994 | if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) |
2995 | return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN); |
2996 | else |
2997 | return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); |
2998 | } |
2999 | |
3000 | ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) |
3001 | { |
3002 | return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); |
3003 | } |
3004 | |
3005 | ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf) |
3006 | { |
3007 | return cpu_show_common(dev, attr, buf, X86_BUG_SRSO); |
3008 | } |
3009 | |
3010 | ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) |
3011 | { |
3012 | return cpu_show_common(dev, attr, buf, X86_BUG_GDS); |
3013 | } |
3014 | |
3015 | ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf) |
3016 | { |
3017 | return cpu_show_common(dev, attr, buf, X86_BUG_RFDS); |
3018 | } |
3019 | #endif |
3020 | |
3021 | void __warn_thunk(void) |
3022 | { |
3023 | WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n" ); |
3024 | } |
3025 | |