1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * AMD Memory Encryption Support |
4 | * |
5 | * Copyright (C) 2016 Advanced Micro Devices, Inc. |
6 | * |
7 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
8 | */ |
9 | |
10 | #include <linux/linkage.h> |
11 | #include <linux/pgtable.h> |
12 | #include <asm/page.h> |
13 | #include <asm/processor-flags.h> |
14 | #include <asm/msr-index.h> |
15 | #include <asm/nospec-branch.h> |
16 | |
17 | .text |
18 | .code64 |
19 | SYM_FUNC_START(sme_encrypt_execute) |
20 | |
21 | /* |
22 | * Entry parameters: |
23 | * RDI - virtual address for the encrypted mapping |
24 | * RSI - virtual address for the decrypted mapping |
25 | * RDX - length to encrypt |
26 | * RCX - virtual address of the encryption workarea, including: |
27 | * - stack page (PAGE_SIZE) |
28 | * - encryption routine page (PAGE_SIZE) |
29 | * - intermediate copy buffer (PMD_SIZE) |
30 | * R8 - physical address of the pagetables to use for encryption |
31 | */ |
32 | |
33 | push %rbp |
34 | movq %rsp, %rbp /* RBP now has original stack pointer */ |
35 | |
36 | /* Set up a one page stack in the non-encrypted memory area */ |
37 | movq %rcx, %rax /* Workarea stack page */ |
38 | leaq PAGE_SIZE(%rax), %rsp /* Set new stack pointer */ |
39 | addq $PAGE_SIZE, %rax /* Workarea encryption routine */ |
40 | |
41 | push %r12 |
42 | movq %rdi, %r10 /* Encrypted area */ |
43 | movq %rsi, %r11 /* Decrypted area */ |
44 | movq %rdx, %r12 /* Area length */ |
45 | |
46 | /* Copy encryption routine into the workarea */ |
47 | movq %rax, %rdi /* Workarea encryption routine */ |
48 | leaq __enc_copy(%rip), %rsi /* Encryption routine */ |
49 | movq $(.L__enc_copy_end - __enc_copy), %rcx /* Encryption routine length */ |
50 | rep movsb |
51 | |
52 | /* Setup registers for call */ |
53 | movq %r10, %rdi /* Encrypted area */ |
54 | movq %r11, %rsi /* Decrypted area */ |
55 | movq %r8, %rdx /* Pagetables used for encryption */ |
56 | movq %r12, %rcx /* Area length */ |
57 | movq %rax, %r8 /* Workarea encryption routine */ |
58 | addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */ |
59 | |
60 | ANNOTATE_RETPOLINE_SAFE |
61 | call *%rax /* Call the encryption routine */ |
62 | |
63 | pop %r12 |
64 | |
65 | movq %rbp, %rsp /* Restore original stack pointer */ |
66 | pop %rbp |
67 | |
68 | /* Offset to __x86_return_thunk would be wrong here */ |
69 | ANNOTATE_UNRET_SAFE |
70 | ret |
71 | int3 |
72 | SYM_FUNC_END(sme_encrypt_execute) |
73 | |
74 | SYM_FUNC_START(__enc_copy) |
75 | /* |
76 | * Routine used to encrypt memory in place. |
77 | * This routine must be run outside of the kernel proper since |
78 | * the kernel will be encrypted during the process. So this |
79 | * routine is defined here and then copied to an area outside |
80 | * of the kernel where it will remain and run decrypted |
81 | * during execution. |
82 | * |
83 | * On entry the registers must be: |
84 | * RDI - virtual address for the encrypted mapping |
85 | * RSI - virtual address for the decrypted mapping |
86 | * RDX - address of the pagetables to use for encryption |
87 | * RCX - length of area |
88 | * R8 - intermediate copy buffer |
89 | * |
90 | * RAX - points to this routine |
91 | * |
92 | * The area will be encrypted by copying from the non-encrypted |
93 | * memory space to an intermediate buffer and then copying from the |
94 | * intermediate buffer back to the encrypted memory space. The physical |
95 | * addresses of the two mappings are the same which results in the area |
96 | * being encrypted "in place". |
97 | */ |
98 | /* Enable the new page tables */ |
99 | mov %rdx, %cr3 |
100 | |
101 | /* Flush any global TLBs */ |
102 | mov %cr4, %rdx |
103 | andq $~X86_CR4_PGE, %rdx |
104 | mov %rdx, %cr4 |
105 | orq $X86_CR4_PGE, %rdx |
106 | mov %rdx, %cr4 |
107 | |
108 | push %r15 |
109 | push %r12 |
110 | |
111 | movq %rcx, %r9 /* Save area length */ |
112 | movq %rdi, %r10 /* Save encrypted area address */ |
113 | movq %rsi, %r11 /* Save decrypted area address */ |
114 | |
115 | /* Set the PAT register PA5 entry to write-protect */ |
116 | movl $MSR_IA32_CR_PAT, %ecx |
117 | rdmsr |
118 | mov %rdx, %r15 /* Save original PAT value */ |
119 | andl $0xffff00ff, %edx /* Clear PA5 */ |
120 | orl $0x00000500, %edx /* Set PA5 to WP */ |
121 | wrmsr |
122 | |
123 | wbinvd /* Invalidate any cache entries */ |
124 | |
125 | /* Copy/encrypt up to 2MB at a time */ |
126 | movq $PMD_SIZE, %r12 |
127 | 1: |
128 | cmpq %r12, %r9 |
129 | jnb 2f |
130 | movq %r9, %r12 |
131 | |
132 | 2: |
133 | movq %r11, %rsi /* Source - decrypted area */ |
134 | movq %r8, %rdi /* Dest - intermediate copy buffer */ |
135 | movq %r12, %rcx |
136 | rep movsb |
137 | |
138 | movq %r8, %rsi /* Source - intermediate copy buffer */ |
139 | movq %r10, %rdi /* Dest - encrypted area */ |
140 | movq %r12, %rcx |
141 | rep movsb |
142 | |
143 | addq %r12, %r11 |
144 | addq %r12, %r10 |
145 | subq %r12, %r9 /* Kernel length decrement */ |
146 | jnz 1b /* Kernel length not zero? */ |
147 | |
148 | /* Restore PAT register */ |
149 | movl $MSR_IA32_CR_PAT, %ecx |
150 | rdmsr |
151 | mov %r15, %rdx /* Restore original PAT value */ |
152 | wrmsr |
153 | |
154 | pop %r12 |
155 | pop %r15 |
156 | |
157 | /* Offset to __x86_return_thunk would be wrong here */ |
158 | ANNOTATE_UNRET_SAFE |
159 | ret |
160 | int3 |
161 | .L__enc_copy_end: |
162 | SYM_FUNC_END(__enc_copy) |
163 | |