1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #include <linux/jump_label.h> |
3 | #include <asm/unwind_hints.h> |
4 | #include <asm/cpufeatures.h> |
5 | #include <asm/page_types.h> |
6 | #include <asm/percpu.h> |
7 | #include <asm/asm-offsets.h> |
8 | #include <asm/processor-flags.h> |
9 | #include <asm/ptrace-abi.h> |
10 | #include <asm/msr.h> |
11 | #include <asm/nospec-branch.h> |
12 | |
13 | /* |
14 | |
15 | x86 function call convention, 64-bit: |
16 | ------------------------------------- |
17 | arguments | callee-saved | extra caller-saved | return |
18 | [callee-clobbered] | | [callee-clobbered] | |
19 | --------------------------------------------------------------------------- |
20 | rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**] |
21 | |
22 | ( rsp is obviously invariant across normal function calls. (gcc can 'merge' |
23 | functions when it sees tail-call optimization possibilities) rflags is |
24 | clobbered. Leftover arguments are passed over the stack frame.) |
25 | |
26 | [*] In the frame-pointers case rbp is fixed to the stack frame. |
27 | |
28 | [**] for struct return values wider than 64 bits the return convention is a |
29 | bit more complex: up to 128 bits width we return small structures |
30 | straight in rax, rdx. For structures larger than that (3 words or |
31 | larger) the caller puts a pointer to an on-stack return struct |
32 | [allocated in the caller's stack frame] into the first argument - i.e. |
33 | into rdi. All other arguments shift up by one in this case. |
34 | Fortunately this case is rare in the kernel. |
35 | |
36 | For 32-bit we have the following conventions - kernel is built with |
37 | -mregparm=3 and -freg-struct-return: |
38 | |
39 | x86 function calling convention, 32-bit: |
40 | ---------------------------------------- |
41 | arguments | callee-saved | extra caller-saved | return |
42 | [callee-clobbered] | | [callee-clobbered] | |
43 | ------------------------------------------------------------------------- |
44 | eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**] |
45 | |
46 | ( here too esp is obviously invariant across normal function calls. eflags |
47 | is clobbered. Leftover arguments are passed over the stack frame. ) |
48 | |
49 | [*] In the frame-pointers case ebp is fixed to the stack frame. |
50 | |
51 | [**] We build with -freg-struct-return, which on 32-bit means similar |
52 | semantics as on 64-bit: edx can be used for a second return value |
53 | (i.e. covering integer and structure sizes up to 64 bits) - after that |
54 | it gets more complex and more expensive: 3-word or larger struct returns |
55 | get done in the caller's frame and the pointer to the return struct goes |
56 | into regparm0, i.e. eax - the other arguments shift up and the |
57 | function's register parameters degenerate to regparm=2 in essence. |
58 | |
59 | */ |
60 | |
61 | #ifdef CONFIG_X86_64 |
62 | |
63 | /* |
64 | * 64-bit system call stack frame layout defines and helpers, |
65 | * for assembly code: |
66 | */ |
67 | |
68 | .macro PUSH_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0 |
69 | .if \save_ret |
70 | pushq %rsi /* pt_regs->si */ |
71 | movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ |
72 | movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */ |
73 | .else |
74 | pushq %rdi /* pt_regs->di */ |
75 | pushq %rsi /* pt_regs->si */ |
76 | .endif |
77 | pushq \rdx /* pt_regs->dx */ |
78 | pushq \rcx /* pt_regs->cx */ |
79 | pushq \rax /* pt_regs->ax */ |
80 | pushq %r8 /* pt_regs->r8 */ |
81 | pushq %r9 /* pt_regs->r9 */ |
82 | pushq %r10 /* pt_regs->r10 */ |
83 | pushq %r11 /* pt_regs->r11 */ |
84 | pushq %rbx /* pt_regs->rbx */ |
85 | pushq %rbp /* pt_regs->rbp */ |
86 | pushq %r12 /* pt_regs->r12 */ |
87 | pushq %r13 /* pt_regs->r13 */ |
88 | pushq %r14 /* pt_regs->r14 */ |
89 | pushq %r15 /* pt_regs->r15 */ |
90 | UNWIND_HINT_REGS |
91 | |
92 | .if \save_ret |
93 | pushq %rsi /* return address on top of stack */ |
94 | .endif |
95 | .endm |
96 | |
97 | .macro CLEAR_REGS |
98 | /* |
99 | * Sanitize registers of values that a speculation attack might |
100 | * otherwise want to exploit. The lower registers are likely clobbered |
101 | * well before they could be put to use in a speculative execution |
102 | * gadget. |
103 | */ |
104 | xorl %esi, %esi /* nospec si */ |
105 | xorl %edx, %edx /* nospec dx */ |
106 | xorl %ecx, %ecx /* nospec cx */ |
107 | xorl %r8d, %r8d /* nospec r8 */ |
108 | xorl %r9d, %r9d /* nospec r9 */ |
109 | xorl %r10d, %r10d /* nospec r10 */ |
110 | xorl %r11d, %r11d /* nospec r11 */ |
111 | xorl %ebx, %ebx /* nospec rbx */ |
112 | xorl %ebp, %ebp /* nospec rbp */ |
113 | xorl %r12d, %r12d /* nospec r12 */ |
114 | xorl %r13d, %r13d /* nospec r13 */ |
115 | xorl %r14d, %r14d /* nospec r14 */ |
116 | xorl %r15d, %r15d /* nospec r15 */ |
117 | |
118 | .endm |
119 | |
120 | .macro PUSH_AND_CLEAR_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0 |
121 | PUSH_REGS rdx=\rdx, rcx=\rcx, rax=\rax, save_ret=\save_ret |
122 | CLEAR_REGS |
123 | .endm |
124 | |
125 | .macro POP_REGS pop_rdi=1 |
126 | popq %r15 |
127 | popq %r14 |
128 | popq %r13 |
129 | popq %r12 |
130 | popq %rbp |
131 | popq %rbx |
132 | popq %r11 |
133 | popq %r10 |
134 | popq %r9 |
135 | popq %r8 |
136 | popq %rax |
137 | popq %rcx |
138 | popq %rdx |
139 | popq %rsi |
140 | .if \pop_rdi |
141 | popq %rdi |
142 | .endif |
143 | .endm |
144 | |
145 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
146 | |
147 | /* |
148 | * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two |
149 | * halves: |
150 | */ |
151 | #define PTI_USER_PGTABLE_BIT PAGE_SHIFT |
152 | #define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT) |
153 | #define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT |
154 | #define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT) |
155 | #define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK) |
156 | |
157 | .macro SET_NOFLUSH_BIT reg:req |
158 | bts $X86_CR3_PCID_NOFLUSH_BIT, \reg |
159 | .endm |
160 | |
161 | .macro ADJUST_KERNEL_CR3 reg:req |
162 | ALTERNATIVE "" , "SET_NOFLUSH_BIT \reg" , X86_FEATURE_PCID |
163 | /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */ |
164 | andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg |
165 | .endm |
166 | |
167 | .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req |
168 | ALTERNATIVE "jmp .Lend_\@" , "" , X86_FEATURE_PTI |
169 | mov %cr3, \scratch_reg |
170 | ADJUST_KERNEL_CR3 \scratch_reg |
171 | mov \scratch_reg, %cr3 |
172 | .Lend_\@: |
173 | .endm |
174 | |
175 | #define THIS_CPU_user_pcid_flush_mask \ |
176 | PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask |
177 | |
178 | .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req |
179 | ALTERNATIVE "jmp .Lend_\@" , "" , X86_FEATURE_PTI |
180 | mov %cr3, \scratch_reg |
181 | |
182 | ALTERNATIVE "jmp .Lwrcr3_\@" , "" , X86_FEATURE_PCID |
183 | |
184 | /* |
185 | * Test if the ASID needs a flush. |
186 | */ |
187 | movq \scratch_reg, \scratch_reg2 |
188 | andq $(0x7FF), \scratch_reg /* mask ASID */ |
189 | bt \scratch_reg, THIS_CPU_user_pcid_flush_mask |
190 | jnc .Lnoflush_\@ |
191 | |
192 | /* Flush needed, clear the bit */ |
193 | btr \scratch_reg, THIS_CPU_user_pcid_flush_mask |
194 | movq \scratch_reg2, \scratch_reg |
195 | jmp .Lwrcr3_pcid_\@ |
196 | |
197 | .Lnoflush_\@: |
198 | movq \scratch_reg2, \scratch_reg |
199 | SET_NOFLUSH_BIT \scratch_reg |
200 | |
201 | .Lwrcr3_pcid_\@: |
202 | /* Flip the ASID to the user version */ |
203 | orq $(PTI_USER_PCID_MASK), \scratch_reg |
204 | |
205 | .Lwrcr3_\@: |
206 | /* Flip the PGD to the user version */ |
207 | orq $(PTI_USER_PGTABLE_MASK), \scratch_reg |
208 | mov \scratch_reg, %cr3 |
209 | .Lend_\@: |
210 | .endm |
211 | |
212 | .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req |
213 | pushq %rax |
214 | SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax |
215 | popq %rax |
216 | .endm |
217 | |
218 | .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req |
219 | ALTERNATIVE "jmp .Ldone_\@" , "" , X86_FEATURE_PTI |
220 | movq %cr3, \scratch_reg |
221 | movq \scratch_reg, \save_reg |
222 | /* |
223 | * Test the user pagetable bit. If set, then the user page tables |
224 | * are active. If clear CR3 already has the kernel page table |
225 | * active. |
226 | */ |
227 | bt $PTI_USER_PGTABLE_BIT, \scratch_reg |
228 | jnc .Ldone_\@ |
229 | |
230 | ADJUST_KERNEL_CR3 \scratch_reg |
231 | movq \scratch_reg, %cr3 |
232 | |
233 | .Ldone_\@: |
234 | .endm |
235 | |
236 | .macro RESTORE_CR3 scratch_reg:req save_reg:req |
237 | ALTERNATIVE "jmp .Lend_\@" , "" , X86_FEATURE_PTI |
238 | |
239 | ALTERNATIVE "jmp .Lwrcr3_\@" , "" , X86_FEATURE_PCID |
240 | |
241 | /* |
242 | * KERNEL pages can always resume with NOFLUSH as we do |
243 | * explicit flushes. |
244 | */ |
245 | bt $PTI_USER_PGTABLE_BIT, \save_reg |
246 | jnc .Lnoflush_\@ |
247 | |
248 | /* |
249 | * Check if there's a pending flush for the user ASID we're |
250 | * about to set. |
251 | */ |
252 | movq \save_reg, \scratch_reg |
253 | andq $(0x7FF), \scratch_reg |
254 | bt \scratch_reg, THIS_CPU_user_pcid_flush_mask |
255 | jnc .Lnoflush_\@ |
256 | |
257 | btr \scratch_reg, THIS_CPU_user_pcid_flush_mask |
258 | jmp .Lwrcr3_\@ |
259 | |
260 | .Lnoflush_\@: |
261 | SET_NOFLUSH_BIT \save_reg |
262 | |
263 | .Lwrcr3_\@: |
264 | /* |
265 | * The CR3 write could be avoided when not changing its value, |
266 | * but would require a CR3 read *and* a scratch register. |
267 | */ |
268 | movq \save_reg, %cr3 |
269 | .Lend_\@: |
270 | .endm |
271 | |
272 | #else /* CONFIG_PAGE_TABLE_ISOLATION=n: */ |
273 | |
274 | .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req |
275 | .endm |
276 | .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req |
277 | .endm |
278 | .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req |
279 | .endm |
280 | .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req |
281 | .endm |
282 | .macro RESTORE_CR3 scratch_reg:req save_reg:req |
283 | .endm |
284 | |
285 | #endif |
286 | |
287 | /* |
288 | * IBRS kernel mitigation for Spectre_v2. |
289 | * |
290 | * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers |
291 | * the regs it uses (AX, CX, DX). Must be called before the first RET |
292 | * instruction (NOTE! UNTRAIN_RET includes a RET instruction) |
293 | * |
294 | * The optional argument is used to save/restore the current value, |
295 | * which is used on the paranoid paths. |
296 | * |
297 | * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set. |
298 | */ |
299 | .macro IBRS_ENTER save_reg |
300 | #ifdef CONFIG_CPU_IBRS_ENTRY |
301 | ALTERNATIVE "jmp .Lend_\@" , "" , X86_FEATURE_KERNEL_IBRS |
302 | movl $MSR_IA32_SPEC_CTRL, %ecx |
303 | |
304 | .ifnb \save_reg |
305 | rdmsr |
306 | shl $32, %rdx |
307 | or %rdx, %rax |
308 | mov %rax, \save_reg |
309 | test $SPEC_CTRL_IBRS, %eax |
310 | jz .Ldo_wrmsr_\@ |
311 | lfence |
312 | jmp .Lend_\@ |
313 | .Ldo_wrmsr_\@: |
314 | .endif |
315 | |
316 | movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx |
317 | movl %edx, %eax |
318 | shr $32, %rdx |
319 | wrmsr |
320 | .Lend_\@: |
321 | #endif |
322 | .endm |
323 | |
324 | /* |
325 | * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX) |
326 | * regs. Must be called after the last RET. |
327 | */ |
328 | .macro IBRS_EXIT save_reg |
329 | #ifdef CONFIG_CPU_IBRS_ENTRY |
330 | ALTERNATIVE "jmp .Lend_\@" , "" , X86_FEATURE_KERNEL_IBRS |
331 | movl $MSR_IA32_SPEC_CTRL, %ecx |
332 | |
333 | .ifnb \save_reg |
334 | mov \save_reg, %rdx |
335 | .else |
336 | movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx |
337 | andl $(~SPEC_CTRL_IBRS), %edx |
338 | .endif |
339 | |
340 | movl %edx, %eax |
341 | shr $32, %rdx |
342 | wrmsr |
343 | .Lend_\@: |
344 | #endif |
345 | .endm |
346 | |
347 | /* |
348 | * Mitigate Spectre v1 for conditional swapgs code paths. |
349 | * |
350 | * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to |
351 | * prevent a speculative swapgs when coming from kernel space. |
352 | * |
353 | * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path, |
354 | * to prevent the swapgs from getting speculatively skipped when coming from |
355 | * user space. |
356 | */ |
357 | .macro FENCE_SWAPGS_USER_ENTRY |
358 | ALTERNATIVE "" , "lfence" , X86_FEATURE_FENCE_SWAPGS_USER |
359 | .endm |
360 | .macro FENCE_SWAPGS_KERNEL_ENTRY |
361 | ALTERNATIVE "" , "lfence" , X86_FEATURE_FENCE_SWAPGS_KERNEL |
362 | .endm |
363 | |
364 | .macro STACKLEAK_ERASE_NOCLOBBER |
365 | #ifdef CONFIG_GCC_PLUGIN_STACKLEAK |
366 | PUSH_AND_CLEAR_REGS |
367 | call stackleak_erase |
368 | POP_REGS |
369 | #endif |
370 | .endm |
371 | |
372 | .macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req |
373 | rdgsbase \save_reg |
374 | GET_PERCPU_BASE \scratch_reg |
375 | wrgsbase \scratch_reg |
376 | .endm |
377 | |
378 | #else /* CONFIG_X86_64 */ |
379 | # undef UNWIND_HINT_IRET_REGS |
380 | # define UNWIND_HINT_IRET_REGS |
381 | #endif /* !CONFIG_X86_64 */ |
382 | |
383 | .macro STACKLEAK_ERASE |
384 | #ifdef CONFIG_GCC_PLUGIN_STACKLEAK |
385 | call stackleak_erase |
386 | #endif |
387 | .endm |
388 | |
389 | #ifdef CONFIG_SMP |
390 | |
391 | /* |
392 | * CPU/node NR is loaded from the limit (size) field of a special segment |
393 | * descriptor entry in GDT. |
394 | */ |
395 | .macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req |
396 | movq $__CPUNODE_SEG, \reg |
397 | lsl \reg, \reg |
398 | .endm |
399 | |
400 | /* |
401 | * Fetch the per-CPU GSBASE value for this processor and put it in @reg. |
402 | * We normally use %gs for accessing per-CPU data, but we are setting up |
403 | * %gs here and obviously can not use %gs itself to access per-CPU data. |
404 | * |
405 | * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and |
406 | * may not restore the host's value until the CPU returns to userspace. |
407 | * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives |
408 | * while running KVM's run loop. |
409 | */ |
410 | .macro GET_PERCPU_BASE reg:req |
411 | LOAD_CPU_AND_NODE_SEG_LIMIT \reg |
412 | andq $VDSO_CPUNODE_MASK, \reg |
413 | movq __per_cpu_offset(, \reg, 8), \reg |
414 | .endm |
415 | |
416 | #else |
417 | |
418 | .macro GET_PERCPU_BASE reg:req |
419 | movq pcpu_unit_offsets(%rip), \reg |
420 | .endm |
421 | |
422 | #endif /* CONFIG_SMP */ |
423 | |