1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2010,2012 Freescale Semiconductor, Inc. All rights reserved. |
4 | * |
5 | * Author: Varun Sethi, <varun.sethi@freescale.com> |
6 | * |
7 | * Description: |
8 | * This file is derived from arch/powerpc/kvm/e500.c, |
9 | * by Yu Liu <yu.liu@freescale.com>. |
10 | */ |
11 | |
12 | #include <linux/kvm_host.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/err.h> |
15 | #include <linux/export.h> |
16 | #include <linux/miscdevice.h> |
17 | #include <linux/module.h> |
18 | |
19 | #include <asm/reg.h> |
20 | #include <asm/cputable.h> |
21 | #include <asm/kvm_ppc.h> |
22 | #include <asm/dbell.h> |
23 | #include <asm/ppc-opcode.h> |
24 | |
25 | #include "booke.h" |
26 | #include "e500.h" |
27 | |
28 | void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type) |
29 | { |
30 | enum ppc_dbell dbell_type; |
31 | unsigned long tag; |
32 | |
33 | switch (type) { |
34 | case INT_CLASS_NONCRIT: |
35 | dbell_type = PPC_G_DBELL; |
36 | break; |
37 | case INT_CLASS_CRIT: |
38 | dbell_type = PPC_G_DBELL_CRIT; |
39 | break; |
40 | case INT_CLASS_MC: |
41 | dbell_type = PPC_G_DBELL_MC; |
42 | break; |
43 | default: |
44 | WARN_ONCE(1, "%s: unknown int type %d\n" , __func__, type); |
45 | return; |
46 | } |
47 | |
48 | preempt_disable(); |
49 | tag = PPC_DBELL_LPID(get_lpid(vcpu)) | vcpu->vcpu_id; |
50 | mb(); |
51 | ppc_msgsnd(dbell_type, 0, tag); |
52 | preempt_enable(); |
53 | } |
54 | |
55 | /* gtlbe must not be mapped by more than one host tlb entry */ |
56 | void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, |
57 | struct kvm_book3e_206_tlb_entry *gtlbe) |
58 | { |
59 | unsigned int tid, ts; |
60 | gva_t eaddr; |
61 | u32 val; |
62 | unsigned long flags; |
63 | |
64 | ts = get_tlb_ts(tlbe: gtlbe); |
65 | tid = get_tlb_tid(tlbe: gtlbe); |
66 | |
67 | /* We search the host TLB to invalidate its shadow TLB entry */ |
68 | val = (tid << 16) | ts; |
69 | eaddr = get_tlb_eaddr(tlbe: gtlbe); |
70 | |
71 | local_irq_save(flags); |
72 | |
73 | mtspr(SPRN_MAS6, val); |
74 | mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu)); |
75 | |
76 | asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr)); |
77 | val = mfspr(SPRN_MAS1); |
78 | if (val & MAS1_VALID) { |
79 | mtspr(SPRN_MAS1, val & ~MAS1_VALID); |
80 | asm volatile("tlbwe" ); |
81 | } |
82 | mtspr(SPRN_MAS5, 0); |
83 | /* NOTE: tlbsx also updates mas8, so clear it for host tlbwe */ |
84 | mtspr(SPRN_MAS8, 0); |
85 | isync(); |
86 | |
87 | local_irq_restore(flags); |
88 | } |
89 | |
90 | void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500) |
91 | { |
92 | unsigned long flags; |
93 | |
94 | local_irq_save(flags); |
95 | mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu)); |
96 | /* |
97 | * clang-17 and older could not assemble tlbilxlpid. |
98 | * https://github.com/ClangBuiltLinux/linux/issues/1891 |
99 | */ |
100 | asm volatile (PPC_TLBILX_LPID); |
101 | mtspr(SPRN_MAS5, 0); |
102 | local_irq_restore(flags); |
103 | } |
104 | |
105 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) |
106 | { |
107 | vcpu->arch.pid = pid; |
108 | } |
109 | |
110 | void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) |
111 | { |
112 | } |
113 | |
114 | /* We use two lpids per VM */ |
115 | static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid); |
116 | |
117 | static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) |
118 | { |
119 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
120 | |
121 | kvmppc_booke_vcpu_load(vcpu, cpu); |
122 | |
123 | mtspr(SPRN_LPID, get_lpid(vcpu)); |
124 | mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); |
125 | mtspr(SPRN_GPIR, vcpu->vcpu_id); |
126 | mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp); |
127 | vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT); |
128 | vcpu->arch.epsc = vcpu->arch.eplc; |
129 | mtspr(SPRN_EPLC, vcpu->arch.eplc); |
130 | mtspr(SPRN_EPSC, vcpu->arch.epsc); |
131 | |
132 | mtspr(SPRN_GIVPR, vcpu->arch.ivpr); |
133 | mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); |
134 | mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); |
135 | mtspr(SPRN_GSPRG0, (unsigned long)vcpu->arch.shared->sprg0); |
136 | mtspr(SPRN_GSPRG1, (unsigned long)vcpu->arch.shared->sprg1); |
137 | mtspr(SPRN_GSPRG2, (unsigned long)vcpu->arch.shared->sprg2); |
138 | mtspr(SPRN_GSPRG3, (unsigned long)vcpu->arch.shared->sprg3); |
139 | |
140 | mtspr(SPRN_GSRR0, vcpu->arch.shared->srr0); |
141 | mtspr(SPRN_GSRR1, vcpu->arch.shared->srr1); |
142 | |
143 | mtspr(SPRN_GEPR, vcpu->arch.epr); |
144 | mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); |
145 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); |
146 | |
147 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || |
148 | __this_cpu_read(last_vcpu_of_lpid[get_lpid(vcpu)]) != vcpu) { |
149 | kvmppc_e500_tlbil_all(vcpu_e500); |
150 | __this_cpu_write(last_vcpu_of_lpid[get_lpid(vcpu)], vcpu); |
151 | } |
152 | } |
153 | |
154 | static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu) |
155 | { |
156 | vcpu->arch.eplc = mfspr(SPRN_EPLC); |
157 | vcpu->arch.epsc = mfspr(SPRN_EPSC); |
158 | |
159 | vcpu->arch.shared->sprg0 = mfspr(SPRN_GSPRG0); |
160 | vcpu->arch.shared->sprg1 = mfspr(SPRN_GSPRG1); |
161 | vcpu->arch.shared->sprg2 = mfspr(SPRN_GSPRG2); |
162 | vcpu->arch.shared->sprg3 = mfspr(SPRN_GSPRG3); |
163 | |
164 | vcpu->arch.shared->srr0 = mfspr(SPRN_GSRR0); |
165 | vcpu->arch.shared->srr1 = mfspr(SPRN_GSRR1); |
166 | |
167 | vcpu->arch.epr = mfspr(SPRN_GEPR); |
168 | vcpu->arch.shared->dar = mfspr(SPRN_GDEAR); |
169 | vcpu->arch.shared->esr = mfspr(SPRN_GESR); |
170 | |
171 | vcpu->arch.oldpir = mfspr(SPRN_PIR); |
172 | |
173 | kvmppc_booke_vcpu_put(vcpu); |
174 | } |
175 | |
176 | static int kvmppc_e500mc_check_processor_compat(void) |
177 | { |
178 | int r; |
179 | |
180 | if (strcmp(cur_cpu_spec->cpu_name, "e500mc" ) == 0) |
181 | r = 0; |
182 | else if (strcmp(cur_cpu_spec->cpu_name, "e5500" ) == 0) |
183 | r = 0; |
184 | #ifdef CONFIG_ALTIVEC |
185 | /* |
186 | * Since guests have the privilege to enable AltiVec, we need AltiVec |
187 | * support in the host to save/restore their context. |
188 | * Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit |
189 | * because it's cleared in the absence of CONFIG_ALTIVEC! |
190 | */ |
191 | else if (strcmp(cur_cpu_spec->cpu_name, "e6500" ) == 0) |
192 | r = 0; |
193 | #endif |
194 | else |
195 | r = -ENOTSUPP; |
196 | |
197 | return r; |
198 | } |
199 | |
200 | int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) |
201 | { |
202 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
203 | |
204 | vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \ |
205 | SPRN_EPCR_DUVD; |
206 | #ifdef CONFIG_64BIT |
207 | vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM; |
208 | #endif |
209 | vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP; |
210 | |
211 | vcpu->arch.pvr = mfspr(SPRN_PVR); |
212 | vcpu_e500->svr = mfspr(SPRN_SVR); |
213 | |
214 | vcpu->arch.cpu_type = KVM_CPU_E500MC; |
215 | |
216 | return 0; |
217 | } |
218 | |
219 | static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu *vcpu, |
220 | struct kvm_sregs *sregs) |
221 | { |
222 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
223 | |
224 | sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_PM | |
225 | KVM_SREGS_E_PC; |
226 | sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL; |
227 | |
228 | sregs->u.e.impl.fsl.features = 0; |
229 | sregs->u.e.impl.fsl.svr = vcpu_e500->svr; |
230 | sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; |
231 | sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; |
232 | |
233 | kvmppc_get_sregs_e500_tlb(vcpu, sregs); |
234 | |
235 | sregs->u.e.ivor_high[3] = |
236 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; |
237 | sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; |
238 | sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; |
239 | |
240 | return kvmppc_get_sregs_ivor(vcpu, sregs); |
241 | } |
242 | |
243 | static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu, |
244 | struct kvm_sregs *sregs) |
245 | { |
246 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
247 | int ret; |
248 | |
249 | if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { |
250 | vcpu_e500->svr = sregs->u.e.impl.fsl.svr; |
251 | vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0; |
252 | vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; |
253 | } |
254 | |
255 | ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs); |
256 | if (ret < 0) |
257 | return ret; |
258 | |
259 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) |
260 | return 0; |
261 | |
262 | if (sregs->u.e.features & KVM_SREGS_E_PM) { |
263 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = |
264 | sregs->u.e.ivor_high[3]; |
265 | } |
266 | |
267 | if (sregs->u.e.features & KVM_SREGS_E_PC) { |
268 | vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = |
269 | sregs->u.e.ivor_high[4]; |
270 | vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = |
271 | sregs->u.e.ivor_high[5]; |
272 | } |
273 | |
274 | return kvmppc_set_sregs_ivor(vcpu, sregs); |
275 | } |
276 | |
277 | static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, |
278 | union kvmppc_one_reg *val) |
279 | { |
280 | int r = 0; |
281 | |
282 | switch (id) { |
283 | case KVM_REG_PPC_SPRG9: |
284 | *val = get_reg_val(id, vcpu->arch.sprg9); |
285 | break; |
286 | default: |
287 | r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); |
288 | } |
289 | |
290 | return r; |
291 | } |
292 | |
293 | static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, |
294 | union kvmppc_one_reg *val) |
295 | { |
296 | int r = 0; |
297 | |
298 | switch (id) { |
299 | case KVM_REG_PPC_SPRG9: |
300 | vcpu->arch.sprg9 = set_reg_val(id, *val); |
301 | break; |
302 | default: |
303 | r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val); |
304 | } |
305 | |
306 | return r; |
307 | } |
308 | |
309 | static int kvmppc_core_vcpu_create_e500mc(struct kvm_vcpu *vcpu) |
310 | { |
311 | struct kvmppc_vcpu_e500 *vcpu_e500; |
312 | int err; |
313 | |
314 | BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0); |
315 | vcpu_e500 = to_e500(vcpu); |
316 | |
317 | /* Invalid PIR value -- this LPID doesn't have valid state on any cpu */ |
318 | vcpu->arch.oldpir = 0xffffffff; |
319 | |
320 | err = kvmppc_e500_tlb_init(vcpu_e500); |
321 | if (err) |
322 | return err; |
323 | |
324 | vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); |
325 | if (!vcpu->arch.shared) { |
326 | err = -ENOMEM; |
327 | goto uninit_tlb; |
328 | } |
329 | |
330 | return 0; |
331 | |
332 | uninit_tlb: |
333 | kvmppc_e500_tlb_uninit(vcpu_e500); |
334 | return err; |
335 | } |
336 | |
337 | static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu) |
338 | { |
339 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
340 | |
341 | free_page((unsigned long)vcpu->arch.shared); |
342 | kvmppc_e500_tlb_uninit(vcpu_e500); |
343 | } |
344 | |
345 | static int kvmppc_core_init_vm_e500mc(struct kvm *kvm) |
346 | { |
347 | int lpid; |
348 | |
349 | lpid = kvmppc_alloc_lpid(); |
350 | if (lpid < 0) |
351 | return lpid; |
352 | |
353 | /* |
354 | * Use two lpids per VM on cores with two threads like e6500. Use |
355 | * even numbers to speedup vcpu lpid computation with consecutive lpids |
356 | * per VM. vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on. |
357 | */ |
358 | if (threads_per_core == 2) |
359 | lpid <<= 1; |
360 | |
361 | kvm->arch.lpid = lpid; |
362 | return 0; |
363 | } |
364 | |
365 | static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm) |
366 | { |
367 | int lpid = kvm->arch.lpid; |
368 | |
369 | if (threads_per_core == 2) |
370 | lpid >>= 1; |
371 | |
372 | kvmppc_free_lpid(lpid); |
373 | } |
374 | |
375 | static struct kvmppc_ops kvm_ops_e500mc = { |
376 | .get_sregs = kvmppc_core_get_sregs_e500mc, |
377 | .set_sregs = kvmppc_core_set_sregs_e500mc, |
378 | .get_one_reg = kvmppc_get_one_reg_e500mc, |
379 | .set_one_reg = kvmppc_set_one_reg_e500mc, |
380 | .vcpu_load = kvmppc_core_vcpu_load_e500mc, |
381 | .vcpu_put = kvmppc_core_vcpu_put_e500mc, |
382 | .vcpu_create = kvmppc_core_vcpu_create_e500mc, |
383 | .vcpu_free = kvmppc_core_vcpu_free_e500mc, |
384 | .init_vm = kvmppc_core_init_vm_e500mc, |
385 | .destroy_vm = kvmppc_core_destroy_vm_e500mc, |
386 | .emulate_op = kvmppc_core_emulate_op_e500, |
387 | .emulate_mtspr = kvmppc_core_emulate_mtspr_e500, |
388 | .emulate_mfspr = kvmppc_core_emulate_mfspr_e500, |
389 | .create_vcpu_debugfs = kvmppc_create_vcpu_debugfs_e500, |
390 | }; |
391 | |
392 | static int __init kvmppc_e500mc_init(void) |
393 | { |
394 | int r; |
395 | |
396 | r = kvmppc_e500mc_check_processor_compat(); |
397 | if (r) |
398 | goto err_out; |
399 | |
400 | r = kvmppc_booke_init(); |
401 | if (r) |
402 | goto err_out; |
403 | |
404 | /* |
405 | * Use two lpids per VM on dual threaded processors like e6500 |
406 | * to workarround the lack of tlb write conditional instruction. |
407 | * Expose half the number of available hardware lpids to the lpid |
408 | * allocator. |
409 | */ |
410 | kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core); |
411 | |
412 | r = kvm_init(vcpu_size: sizeof(struct kvmppc_vcpu_e500), vcpu_align: 0, THIS_MODULE); |
413 | if (r) |
414 | goto err_out; |
415 | kvm_ops_e500mc.owner = THIS_MODULE; |
416 | kvmppc_pr_ops = &kvm_ops_e500mc; |
417 | |
418 | err_out: |
419 | return r; |
420 | } |
421 | |
422 | static void __exit kvmppc_e500mc_exit(void) |
423 | { |
424 | kvmppc_pr_ops = NULL; |
425 | kvmppc_booke_exit(); |
426 | } |
427 | |
428 | module_init(kvmppc_e500mc_init); |
429 | module_exit(kvmppc_e500mc_exit); |
430 | MODULE_ALIAS_MISCDEV(KVM_MINOR); |
431 | MODULE_ALIAS("devname:kvm" ); |
432 | |