1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * |
4 | * Copyright SUSE Linux Products GmbH 2009 |
5 | * |
6 | * Authors: Alexander Graf <agraf@suse.de> |
7 | */ |
8 | |
9 | #include <asm/ppc_asm.h> |
10 | #include <asm/kvm_asm.h> |
11 | #include <asm/reg.h> |
12 | #include <asm/page.h> |
13 | #include <asm/asm-offsets.h> |
14 | #include <asm/exception-64s.h> |
15 | #include <asm/asm-compat.h> |
16 | |
17 | #if defined(CONFIG_PPC_BOOK3S_64) |
18 | #ifdef CONFIG_PPC64_ELF_ABI_V2 |
19 | #define FUNC(name) name |
20 | #else |
21 | #define FUNC(name) GLUE(.,name) |
22 | #endif |
23 | #define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU |
24 | |
25 | #elif defined(CONFIG_PPC_BOOK3S_32) |
26 | #define FUNC(name) name |
27 | #define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2) |
28 | |
29 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
30 | |
31 | #define VCPU_LOAD_NVGPRS(vcpu) \ |
32 | PPC_LL r14, VCPU_GPR(R14)(vcpu); \ |
33 | PPC_LL r15, VCPU_GPR(R15)(vcpu); \ |
34 | PPC_LL r16, VCPU_GPR(R16)(vcpu); \ |
35 | PPC_LL r17, VCPU_GPR(R17)(vcpu); \ |
36 | PPC_LL r18, VCPU_GPR(R18)(vcpu); \ |
37 | PPC_LL r19, VCPU_GPR(R19)(vcpu); \ |
38 | PPC_LL r20, VCPU_GPR(R20)(vcpu); \ |
39 | PPC_LL r21, VCPU_GPR(R21)(vcpu); \ |
40 | PPC_LL r22, VCPU_GPR(R22)(vcpu); \ |
41 | PPC_LL r23, VCPU_GPR(R23)(vcpu); \ |
42 | PPC_LL r24, VCPU_GPR(R24)(vcpu); \ |
43 | PPC_LL r25, VCPU_GPR(R25)(vcpu); \ |
44 | PPC_LL r26, VCPU_GPR(R26)(vcpu); \ |
45 | PPC_LL r27, VCPU_GPR(R27)(vcpu); \ |
46 | PPC_LL r28, VCPU_GPR(R28)(vcpu); \ |
47 | PPC_LL r29, VCPU_GPR(R29)(vcpu); \ |
48 | PPC_LL r30, VCPU_GPR(R30)(vcpu); \ |
49 | PPC_LL r31, VCPU_GPR(R31)(vcpu); \ |
50 | |
51 | /***************************************************************************** |
52 | * * |
53 | * Guest entry / exit code that is in kernel module memory (highmem) * |
54 | * * |
55 | ****************************************************************************/ |
56 | |
57 | /* Registers: |
58 | * r3: vcpu pointer |
59 | */ |
60 | _GLOBAL(__kvmppc_vcpu_run) |
61 | |
62 | kvm_start_entry: |
63 | /* Write correct stack frame */ |
64 | mflr r0 |
65 | PPC_STL r0,PPC_LR_STKOFF(r1) |
66 | |
67 | /* Save host state to the stack */ |
68 | PPC_STLU r1, -SWITCH_FRAME_SIZE(r1) |
69 | |
70 | /* Save r3 (vcpu) */ |
71 | SAVE_GPR(3, r1) |
72 | |
73 | /* Save non-volatile registers (r14 - r31) */ |
74 | SAVE_NVGPRS(r1) |
75 | |
76 | /* Save CR */ |
77 | mfcr r14 |
78 | stw r14, _CCR(r1) |
79 | |
80 | /* Save LR */ |
81 | PPC_STL r0, _LINK(r1) |
82 | |
83 | /* Load non-volatile guest state from the vcpu */ |
84 | VCPU_LOAD_NVGPRS(r3) |
85 | |
86 | kvm_start_lightweight: |
87 | /* Copy registers into shadow vcpu so we can access them in real mode */ |
88 | bl FUNC(kvmppc_copy_to_svcpu) |
89 | nop |
90 | REST_GPR(3, r1) |
91 | |
92 | #ifdef CONFIG_PPC_BOOK3S_64 |
93 | /* Get the dcbz32 flag */ |
94 | PPC_LL r0, VCPU_HFLAGS(r3) |
95 | rldicl r0, r0, 0, 63 /* r3 &= 1 */ |
96 | stb r0, HSTATE_RESTORE_HID5(r13) |
97 | |
98 | /* Load up guest SPRG3 value, since it's user readable */ |
99 | lbz r4, VCPU_SHAREDBE(r3) |
100 | cmpwi r4, 0 |
101 | ld r5, VCPU_SHARED(r3) |
102 | beq sprg3_little_endian |
103 | sprg3_big_endian: |
104 | #ifdef __BIG_ENDIAN__ |
105 | ld r4, VCPU_SHARED_SPRG3(r5) |
106 | #else |
107 | addi r5, r5, VCPU_SHARED_SPRG3 |
108 | ldbrx r4, 0, r5 |
109 | #endif |
110 | b after_sprg3_load |
111 | sprg3_little_endian: |
112 | #ifdef __LITTLE_ENDIAN__ |
113 | ld r4, VCPU_SHARED_SPRG3(r5) |
114 | #else |
115 | addi r5, r5, VCPU_SHARED_SPRG3 |
116 | ldbrx r4, 0, r5 |
117 | #endif |
118 | |
119 | after_sprg3_load: |
120 | mtspr SPRN_SPRG3, r4 |
121 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
122 | |
123 | PPC_LL r4, VCPU_SHADOW_MSR(r3) /* get shadow_msr */ |
124 | |
125 | /* Jump to segment patching handler and into our guest */ |
126 | bl FUNC(kvmppc_entry_trampoline) |
127 | nop |
128 | |
129 | /* |
130 | * This is the handler in module memory. It gets jumped at from the |
131 | * lowmem trampoline code, so it's basically the guest exit code. |
132 | * |
133 | */ |
134 | |
135 | /* |
136 | * Register usage at this point: |
137 | * |
138 | * R1 = host R1 |
139 | * R2 = host R2 |
140 | * R12 = exit handler id |
141 | * R13 = PACA |
142 | * SVCPU.* = guest * |
143 | * MSR.EE = 1 |
144 | * |
145 | */ |
146 | |
147 | PPC_LL r3, GPR3(r1) /* vcpu pointer */ |
148 | |
149 | /* |
150 | * kvmppc_copy_from_svcpu can clobber volatile registers, save |
151 | * the exit handler id to the vcpu and restore it from there later. |
152 | */ |
153 | stw r12, VCPU_TRAP(r3) |
154 | |
155 | /* Transfer reg values from shadow vcpu back to vcpu struct */ |
156 | |
157 | bl FUNC(kvmppc_copy_from_svcpu) |
158 | nop |
159 | |
160 | #ifdef CONFIG_PPC_BOOK3S_64 |
161 | /* |
162 | * Reload kernel SPRG3 value. |
163 | * No need to save guest value as usermode can't modify SPRG3. |
164 | */ |
165 | ld r3, PACA_SPRG_VDSO(r13) |
166 | mtspr SPRN_SPRG_VDSO_WRITE, r3 |
167 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
168 | |
169 | /* R7 = vcpu */ |
170 | PPC_LL r7, GPR3(r1) |
171 | |
172 | PPC_STL r14, VCPU_GPR(R14)(r7) |
173 | PPC_STL r15, VCPU_GPR(R15)(r7) |
174 | PPC_STL r16, VCPU_GPR(R16)(r7) |
175 | PPC_STL r17, VCPU_GPR(R17)(r7) |
176 | PPC_STL r18, VCPU_GPR(R18)(r7) |
177 | PPC_STL r19, VCPU_GPR(R19)(r7) |
178 | PPC_STL r20, VCPU_GPR(R20)(r7) |
179 | PPC_STL r21, VCPU_GPR(R21)(r7) |
180 | PPC_STL r22, VCPU_GPR(R22)(r7) |
181 | PPC_STL r23, VCPU_GPR(R23)(r7) |
182 | PPC_STL r24, VCPU_GPR(R24)(r7) |
183 | PPC_STL r25, VCPU_GPR(R25)(r7) |
184 | PPC_STL r26, VCPU_GPR(R26)(r7) |
185 | PPC_STL r27, VCPU_GPR(R27)(r7) |
186 | PPC_STL r28, VCPU_GPR(R28)(r7) |
187 | PPC_STL r29, VCPU_GPR(R29)(r7) |
188 | PPC_STL r30, VCPU_GPR(R30)(r7) |
189 | PPC_STL r31, VCPU_GPR(R31)(r7) |
190 | |
191 | /* Pass the exit number as 2nd argument to kvmppc_handle_exit */ |
192 | lwz r4, VCPU_TRAP(r7) |
193 | |
194 | /* Restore r3 (vcpu) */ |
195 | REST_GPR(3, r1) |
196 | bl FUNC(kvmppc_handle_exit_pr) |
197 | |
198 | /* If RESUME_GUEST, get back in the loop */ |
199 | cmpwi r3, RESUME_GUEST |
200 | beq kvm_loop_lightweight |
201 | |
202 | cmpwi r3, RESUME_GUEST_NV |
203 | beq kvm_loop_heavyweight |
204 | |
205 | kvm_exit_loop: |
206 | |
207 | PPC_LL r4, _LINK(r1) |
208 | mtlr r4 |
209 | |
210 | lwz r14, _CCR(r1) |
211 | mtcr r14 |
212 | |
213 | /* Restore non-volatile host registers (r14 - r31) */ |
214 | REST_NVGPRS(r1) |
215 | |
216 | addi r1, r1, SWITCH_FRAME_SIZE |
217 | blr |
218 | |
219 | kvm_loop_heavyweight: |
220 | |
221 | PPC_LL r4, _LINK(r1) |
222 | PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1) |
223 | |
224 | /* Load vcpu */ |
225 | REST_GPR(3, r1) |
226 | |
227 | /* Load non-volatile guest state from the vcpu */ |
228 | VCPU_LOAD_NVGPRS(r3) |
229 | |
230 | /* Jump back into the beginning of this function */ |
231 | b kvm_start_lightweight |
232 | |
233 | kvm_loop_lightweight: |
234 | |
235 | /* We'll need the vcpu pointer */ |
236 | REST_GPR(3, r1) |
237 | |
238 | /* Jump back into the beginning of this function */ |
239 | b kvm_start_lightweight |
240 | |