1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * KVM/MIPS: Instruction/Exception emulation |
7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ |
11 | |
12 | #include <linux/errno.h> |
13 | #include <linux/err.h> |
14 | #include <linux/ktime.h> |
15 | #include <linux/kvm_host.h> |
16 | #include <linux/vmalloc.h> |
17 | #include <linux/fs.h> |
18 | #include <linux/memblock.h> |
19 | #include <linux/random.h> |
20 | #include <asm/page.h> |
21 | #include <asm/cacheflush.h> |
22 | #include <asm/cacheops.h> |
23 | #include <asm/cpu-info.h> |
24 | #include <asm/mmu_context.h> |
25 | #include <asm/tlbflush.h> |
26 | #include <asm/inst.h> |
27 | |
28 | #undef CONFIG_MIPS_MT |
29 | #include <asm/r4kcache.h> |
30 | #define CONFIG_MIPS_MT |
31 | |
32 | #include "interrupt.h" |
33 | |
34 | #include "trace.h" |
35 | |
36 | /* |
37 | * Compute the return address and do emulate branch simulation, if required. |
38 | * This function should be called only in branch delay slot active. |
39 | */ |
40 | static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc, |
41 | unsigned long *out) |
42 | { |
43 | unsigned int dspcontrol; |
44 | union mips_instruction insn; |
45 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
46 | long epc = instpc; |
47 | long nextpc; |
48 | int err; |
49 | |
50 | if (epc & 3) { |
51 | kvm_err("%s: unaligned epc\n" , __func__); |
52 | return -EINVAL; |
53 | } |
54 | |
55 | /* Read the instruction */ |
56 | err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word); |
57 | if (err) |
58 | return err; |
59 | |
60 | switch (insn.i_format.opcode) { |
61 | /* jr and jalr are in r_format format. */ |
62 | case spec_op: |
63 | switch (insn.r_format.func) { |
64 | case jalr_op: |
65 | arch->gprs[insn.r_format.rd] = epc + 8; |
66 | fallthrough; |
67 | case jr_op: |
68 | nextpc = arch->gprs[insn.r_format.rs]; |
69 | break; |
70 | default: |
71 | return -EINVAL; |
72 | } |
73 | break; |
74 | |
75 | /* |
76 | * This group contains: |
77 | * bltz_op, bgez_op, bltzl_op, bgezl_op, |
78 | * bltzal_op, bgezal_op, bltzall_op, bgezall_op. |
79 | */ |
80 | case bcond_op: |
81 | switch (insn.i_format.rt) { |
82 | case bltz_op: |
83 | case bltzl_op: |
84 | if ((long)arch->gprs[insn.i_format.rs] < 0) |
85 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
86 | else |
87 | epc += 8; |
88 | nextpc = epc; |
89 | break; |
90 | |
91 | case bgez_op: |
92 | case bgezl_op: |
93 | if ((long)arch->gprs[insn.i_format.rs] >= 0) |
94 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
95 | else |
96 | epc += 8; |
97 | nextpc = epc; |
98 | break; |
99 | |
100 | case bltzal_op: |
101 | case bltzall_op: |
102 | arch->gprs[31] = epc + 8; |
103 | if ((long)arch->gprs[insn.i_format.rs] < 0) |
104 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
105 | else |
106 | epc += 8; |
107 | nextpc = epc; |
108 | break; |
109 | |
110 | case bgezal_op: |
111 | case bgezall_op: |
112 | arch->gprs[31] = epc + 8; |
113 | if ((long)arch->gprs[insn.i_format.rs] >= 0) |
114 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
115 | else |
116 | epc += 8; |
117 | nextpc = epc; |
118 | break; |
119 | case bposge32_op: |
120 | if (!cpu_has_dsp) { |
121 | kvm_err("%s: DSP branch but not DSP ASE\n" , |
122 | __func__); |
123 | return -EINVAL; |
124 | } |
125 | |
126 | dspcontrol = rddsp(0x01); |
127 | |
128 | if (dspcontrol >= 32) |
129 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
130 | else |
131 | epc += 8; |
132 | nextpc = epc; |
133 | break; |
134 | default: |
135 | return -EINVAL; |
136 | } |
137 | break; |
138 | |
139 | /* These are unconditional and in j_format. */ |
140 | case jal_op: |
141 | arch->gprs[31] = instpc + 8; |
142 | fallthrough; |
143 | case j_op: |
144 | epc += 4; |
145 | epc >>= 28; |
146 | epc <<= 28; |
147 | epc |= (insn.j_format.target << 2); |
148 | nextpc = epc; |
149 | break; |
150 | |
151 | /* These are conditional and in i_format. */ |
152 | case beq_op: |
153 | case beql_op: |
154 | if (arch->gprs[insn.i_format.rs] == |
155 | arch->gprs[insn.i_format.rt]) |
156 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
157 | else |
158 | epc += 8; |
159 | nextpc = epc; |
160 | break; |
161 | |
162 | case bne_op: |
163 | case bnel_op: |
164 | if (arch->gprs[insn.i_format.rs] != |
165 | arch->gprs[insn.i_format.rt]) |
166 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
167 | else |
168 | epc += 8; |
169 | nextpc = epc; |
170 | break; |
171 | |
172 | case blez_op: /* POP06 */ |
173 | #ifndef CONFIG_CPU_MIPSR6 |
174 | case blezl_op: /* removed in R6 */ |
175 | #endif |
176 | if (insn.i_format.rt != 0) |
177 | goto compact_branch; |
178 | if ((long)arch->gprs[insn.i_format.rs] <= 0) |
179 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
180 | else |
181 | epc += 8; |
182 | nextpc = epc; |
183 | break; |
184 | |
185 | case bgtz_op: /* POP07 */ |
186 | #ifndef CONFIG_CPU_MIPSR6 |
187 | case bgtzl_op: /* removed in R6 */ |
188 | #endif |
189 | if (insn.i_format.rt != 0) |
190 | goto compact_branch; |
191 | if ((long)arch->gprs[insn.i_format.rs] > 0) |
192 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
193 | else |
194 | epc += 8; |
195 | nextpc = epc; |
196 | break; |
197 | |
198 | /* And now the FPA/cp1 branch instructions. */ |
199 | case cop1_op: |
200 | kvm_err("%s: unsupported cop1_op\n" , __func__); |
201 | return -EINVAL; |
202 | |
203 | #ifdef CONFIG_CPU_MIPSR6 |
204 | /* R6 added the following compact branches with forbidden slots */ |
205 | case blezl_op: /* POP26 */ |
206 | case bgtzl_op: /* POP27 */ |
207 | /* only rt == 0 isn't compact branch */ |
208 | if (insn.i_format.rt != 0) |
209 | goto compact_branch; |
210 | return -EINVAL; |
211 | case pop10_op: |
212 | case pop30_op: |
213 | /* only rs == rt == 0 is reserved, rest are compact branches */ |
214 | if (insn.i_format.rs != 0 || insn.i_format.rt != 0) |
215 | goto compact_branch; |
216 | return -EINVAL; |
217 | case pop66_op: |
218 | case pop76_op: |
219 | /* only rs == 0 isn't compact branch */ |
220 | if (insn.i_format.rs != 0) |
221 | goto compact_branch; |
222 | return -EINVAL; |
223 | compact_branch: |
224 | /* |
225 | * If we've hit an exception on the forbidden slot, then |
226 | * the branch must not have been taken. |
227 | */ |
228 | epc += 8; |
229 | nextpc = epc; |
230 | break; |
231 | #else |
232 | compact_branch: |
233 | /* Fall through - Compact branches not supported before R6 */ |
234 | #endif |
235 | default: |
236 | return -EINVAL; |
237 | } |
238 | |
239 | *out = nextpc; |
240 | return 0; |
241 | } |
242 | |
243 | enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause) |
244 | { |
245 | int err; |
246 | |
247 | if (cause & CAUSEF_BD) { |
248 | err = kvm_compute_return_epc(vcpu, instpc: vcpu->arch.pc, |
249 | out: &vcpu->arch.pc); |
250 | if (err) |
251 | return EMULATE_FAIL; |
252 | } else { |
253 | vcpu->arch.pc += 4; |
254 | } |
255 | |
256 | kvm_debug("update_pc(): New PC: %#lx\n" , vcpu->arch.pc); |
257 | |
258 | return EMULATE_DONE; |
259 | } |
260 | |
261 | /** |
262 | * kvm_get_badinstr() - Get bad instruction encoding. |
263 | * @opc: Guest pointer to faulting instruction. |
264 | * @vcpu: KVM VCPU information. |
265 | * |
266 | * Gets the instruction encoding of the faulting instruction, using the saved |
267 | * BadInstr register value if it exists, otherwise falling back to reading guest |
268 | * memory at @opc. |
269 | * |
270 | * Returns: The instruction encoding of the faulting instruction. |
271 | */ |
272 | int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) |
273 | { |
274 | if (cpu_has_badinstr) { |
275 | *out = vcpu->arch.host_cp0_badinstr; |
276 | return 0; |
277 | } else { |
278 | WARN_ONCE(1, "CPU doesn't have BadInstr register\n" ); |
279 | return -EINVAL; |
280 | } |
281 | } |
282 | |
283 | /** |
284 | * kvm_get_badinstrp() - Get bad prior instruction encoding. |
285 | * @opc: Guest pointer to prior faulting instruction. |
286 | * @vcpu: KVM VCPU information. |
287 | * |
288 | * Gets the instruction encoding of the prior faulting instruction (the branch |
289 | * containing the delay slot which faulted), using the saved BadInstrP register |
290 | * value if it exists, otherwise falling back to reading guest memory at @opc. |
291 | * |
292 | * Returns: The instruction encoding of the prior faulting instruction. |
293 | */ |
294 | int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) |
295 | { |
296 | if (cpu_has_badinstrp) { |
297 | *out = vcpu->arch.host_cp0_badinstrp; |
298 | return 0; |
299 | } else { |
300 | WARN_ONCE(1, "CPU doesn't have BadInstrp register\n" ); |
301 | return -EINVAL; |
302 | } |
303 | } |
304 | |
305 | /** |
306 | * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled. |
307 | * @vcpu: Virtual CPU. |
308 | * |
309 | * Returns: 1 if the CP0_Count timer is disabled by either the guest |
310 | * CP0_Cause.DC bit or the count_ctl.DC bit. |
311 | * 0 otherwise (in which case CP0_Count timer is running). |
312 | */ |
313 | int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) |
314 | { |
315 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
316 | |
317 | return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || |
318 | (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); |
319 | } |
320 | |
321 | /** |
322 | * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count. |
323 | * |
324 | * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias. |
325 | * |
326 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). |
327 | */ |
328 | static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now) |
329 | { |
330 | s64 now_ns, periods; |
331 | u64 delta; |
332 | |
333 | now_ns = ktime_to_ns(kt: now); |
334 | delta = now_ns + vcpu->arch.count_dyn_bias; |
335 | |
336 | if (delta >= vcpu->arch.count_period) { |
337 | /* If delta is out of safe range the bias needs adjusting */ |
338 | periods = div64_s64(dividend: now_ns, divisor: vcpu->arch.count_period); |
339 | vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period; |
340 | /* Recalculate delta with new bias */ |
341 | delta = now_ns + vcpu->arch.count_dyn_bias; |
342 | } |
343 | |
344 | /* |
345 | * We've ensured that: |
346 | * delta < count_period |
347 | * |
348 | * Therefore the intermediate delta*count_hz will never overflow since |
349 | * at the boundary condition: |
350 | * delta = count_period |
351 | * delta = NSEC_PER_SEC * 2^32 / count_hz |
352 | * delta * count_hz = NSEC_PER_SEC * 2^32 |
353 | */ |
354 | return div_u64(dividend: delta * vcpu->arch.count_hz, NSEC_PER_SEC); |
355 | } |
356 | |
357 | /** |
358 | * kvm_mips_count_time() - Get effective current time. |
359 | * @vcpu: Virtual CPU. |
360 | * |
361 | * Get effective monotonic ktime. This is usually a straightforward ktime_get(), |
362 | * except when the master disable bit is set in count_ctl, in which case it is |
363 | * count_resume, i.e. the time that the count was disabled. |
364 | * |
365 | * Returns: Effective monotonic ktime for CP0_Count. |
366 | */ |
367 | static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu) |
368 | { |
369 | if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) |
370 | return vcpu->arch.count_resume; |
371 | |
372 | return ktime_get(); |
373 | } |
374 | |
375 | /** |
376 | * kvm_mips_read_count_running() - Read the current count value as if running. |
377 | * @vcpu: Virtual CPU. |
378 | * @now: Kernel time to read CP0_Count at. |
379 | * |
380 | * Returns the current guest CP0_Count register at time @now and handles if the |
381 | * timer interrupt is pending and hasn't been handled yet. |
382 | * |
383 | * Returns: The current value of the guest CP0_Count register. |
384 | */ |
385 | static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) |
386 | { |
387 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
388 | ktime_t expires, threshold; |
389 | u32 count, compare; |
390 | int running; |
391 | |
392 | /* Calculate the biased and scaled guest CP0_Count */ |
393 | count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); |
394 | compare = kvm_read_c0_guest_compare(cop0); |
395 | |
396 | /* |
397 | * Find whether CP0_Count has reached the closest timer interrupt. If |
398 | * not, we shouldn't inject it. |
399 | */ |
400 | if ((s32)(count - compare) < 0) |
401 | return count; |
402 | |
403 | /* |
404 | * The CP0_Count we're going to return has already reached the closest |
405 | * timer interrupt. Quickly check if it really is a new interrupt by |
406 | * looking at whether the interval until the hrtimer expiry time is |
407 | * less than 1/4 of the timer period. |
408 | */ |
409 | expires = hrtimer_get_expires(timer: &vcpu->arch.comparecount_timer); |
410 | threshold = ktime_add_ns(now, vcpu->arch.count_period / 4); |
411 | if (ktime_before(cmp1: expires, cmp2: threshold)) { |
412 | /* |
413 | * Cancel it while we handle it so there's no chance of |
414 | * interference with the timeout handler. |
415 | */ |
416 | running = hrtimer_cancel(timer: &vcpu->arch.comparecount_timer); |
417 | |
418 | /* Nothing should be waiting on the timeout */ |
419 | kvm_mips_callbacks->queue_timer_int(vcpu); |
420 | |
421 | /* |
422 | * Restart the timer if it was running based on the expiry time |
423 | * we read, so that we don't push it back 2 periods. |
424 | */ |
425 | if (running) { |
426 | expires = ktime_add_ns(expires, |
427 | vcpu->arch.count_period); |
428 | hrtimer_start(timer: &vcpu->arch.comparecount_timer, tim: expires, |
429 | mode: HRTIMER_MODE_ABS); |
430 | } |
431 | } |
432 | |
433 | return count; |
434 | } |
435 | |
436 | /** |
437 | * kvm_mips_read_count() - Read the current count value. |
438 | * @vcpu: Virtual CPU. |
439 | * |
440 | * Read the current guest CP0_Count value, taking into account whether the timer |
441 | * is stopped. |
442 | * |
443 | * Returns: The current guest CP0_Count value. |
444 | */ |
445 | u32 kvm_mips_read_count(struct kvm_vcpu *vcpu) |
446 | { |
447 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
448 | |
449 | /* If count disabled just read static copy of count */ |
450 | if (kvm_mips_count_disabled(vcpu)) |
451 | return kvm_read_c0_guest_count(cop0); |
452 | |
453 | return kvm_mips_read_count_running(vcpu, now: ktime_get()); |
454 | } |
455 | |
456 | /** |
457 | * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer. |
458 | * @vcpu: Virtual CPU. |
459 | * @count: Output pointer for CP0_Count value at point of freeze. |
460 | * |
461 | * Freeze the hrtimer safely and return both the ktime and the CP0_Count value |
462 | * at the point it was frozen. It is guaranteed that any pending interrupts at |
463 | * the point it was frozen are handled, and none after that point. |
464 | * |
465 | * This is useful where the time/CP0_Count is needed in the calculation of the |
466 | * new parameters. |
467 | * |
468 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). |
469 | * |
470 | * Returns: The ktime at the point of freeze. |
471 | */ |
472 | ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count) |
473 | { |
474 | ktime_t now; |
475 | |
476 | /* stop hrtimer before finding time */ |
477 | hrtimer_cancel(timer: &vcpu->arch.comparecount_timer); |
478 | now = ktime_get(); |
479 | |
480 | /* find count at this point and handle pending hrtimer */ |
481 | *count = kvm_mips_read_count_running(vcpu, now); |
482 | |
483 | return now; |
484 | } |
485 | |
486 | /** |
487 | * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. |
488 | * @vcpu: Virtual CPU. |
489 | * @now: ktime at point of resume. |
490 | * @count: CP0_Count at point of resume. |
491 | * |
492 | * Resumes the timer and updates the timer expiry based on @now and @count. |
493 | * This can be used in conjunction with kvm_mips_freeze_timer() when timer |
494 | * parameters need to be changed. |
495 | * |
496 | * It is guaranteed that a timer interrupt immediately after resume will be |
497 | * handled, but not if CP_Compare is exactly at @count. That case is already |
498 | * handled by kvm_mips_freeze_timer(). |
499 | * |
500 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). |
501 | */ |
502 | static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, |
503 | ktime_t now, u32 count) |
504 | { |
505 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
506 | u32 compare; |
507 | u64 delta; |
508 | ktime_t expire; |
509 | |
510 | /* Calculate timeout (wrap 0 to 2^32) */ |
511 | compare = kvm_read_c0_guest_compare(cop0); |
512 | delta = (u64)(u32)(compare - count - 1) + 1; |
513 | delta = div_u64(dividend: delta * NSEC_PER_SEC, divisor: vcpu->arch.count_hz); |
514 | expire = ktime_add_ns(now, delta); |
515 | |
516 | /* Update hrtimer to use new timeout */ |
517 | hrtimer_cancel(timer: &vcpu->arch.comparecount_timer); |
518 | hrtimer_start(timer: &vcpu->arch.comparecount_timer, tim: expire, mode: HRTIMER_MODE_ABS); |
519 | } |
520 | |
521 | /** |
522 | * kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry. |
523 | * @vcpu: Virtual CPU. |
524 | * @before: Time before Count was saved, lower bound of drift calculation. |
525 | * @count: CP0_Count at point of restore. |
526 | * @min_drift: Minimum amount of drift permitted before correction. |
527 | * Must be <= 0. |
528 | * |
529 | * Restores the timer from a particular @count, accounting for drift. This can |
530 | * be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is |
531 | * to be used for a period of time, but the exact ktime corresponding to the |
532 | * final Count that must be restored is not known. |
533 | * |
534 | * It is guaranteed that a timer interrupt immediately after restore will be |
535 | * handled, but not if CP0_Compare is exactly at @count. That case should |
536 | * already be handled when the hardware timer state is saved. |
537 | * |
538 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not |
539 | * stopped). |
540 | * |
541 | * Returns: Amount of correction to count_bias due to drift. |
542 | */ |
543 | int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before, |
544 | u32 count, int min_drift) |
545 | { |
546 | ktime_t now, count_time; |
547 | u32 now_count, before_count; |
548 | u64 delta; |
549 | int drift, ret = 0; |
550 | |
551 | /* Calculate expected count at before */ |
552 | before_count = vcpu->arch.count_bias + |
553 | kvm_mips_ktime_to_count(vcpu, now: before); |
554 | |
555 | /* |
556 | * Detect significantly negative drift, where count is lower than |
557 | * expected. Some negative drift is expected when hardware counter is |
558 | * set after kvm_mips_freeze_timer(), and it is harmless to allow the |
559 | * time to jump forwards a little, within reason. If the drift is too |
560 | * significant, adjust the bias to avoid a big Guest.CP0_Count jump. |
561 | */ |
562 | drift = count - before_count; |
563 | if (drift < min_drift) { |
564 | count_time = before; |
565 | vcpu->arch.count_bias += drift; |
566 | ret = drift; |
567 | goto resume; |
568 | } |
569 | |
570 | /* Calculate expected count right now */ |
571 | now = ktime_get(); |
572 | now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); |
573 | |
574 | /* |
575 | * Detect positive drift, where count is higher than expected, and |
576 | * adjust the bias to avoid guest time going backwards. |
577 | */ |
578 | drift = count - now_count; |
579 | if (drift > 0) { |
580 | count_time = now; |
581 | vcpu->arch.count_bias += drift; |
582 | ret = drift; |
583 | goto resume; |
584 | } |
585 | |
586 | /* Subtract nanosecond delta to find ktime when count was read */ |
587 | delta = (u64)(u32)(now_count - count); |
588 | delta = div_u64(dividend: delta * NSEC_PER_SEC, divisor: vcpu->arch.count_hz); |
589 | count_time = ktime_sub_ns(now, delta); |
590 | |
591 | resume: |
592 | /* Resume using the calculated ktime */ |
593 | kvm_mips_resume_hrtimer(vcpu, now: count_time, count); |
594 | return ret; |
595 | } |
596 | |
597 | /** |
598 | * kvm_mips_write_count() - Modify the count and update timer. |
599 | * @vcpu: Virtual CPU. |
600 | * @count: Guest CP0_Count value to set. |
601 | * |
602 | * Sets the CP0_Count value and updates the timer accordingly. |
603 | */ |
604 | void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count) |
605 | { |
606 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
607 | ktime_t now; |
608 | |
609 | /* Calculate bias */ |
610 | now = kvm_mips_count_time(vcpu); |
611 | vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); |
612 | |
613 | if (kvm_mips_count_disabled(vcpu)) |
614 | /* The timer's disabled, adjust the static count */ |
615 | kvm_write_c0_guest_count(cop0, count); |
616 | else |
617 | /* Update timeout */ |
618 | kvm_mips_resume_hrtimer(vcpu, now, count); |
619 | } |
620 | |
621 | /** |
622 | * kvm_mips_init_count() - Initialise timer. |
623 | * @vcpu: Virtual CPU. |
624 | * @count_hz: Frequency of timer. |
625 | * |
626 | * Initialise the timer to the specified frequency, zero it, and set it going if |
627 | * it's enabled. |
628 | */ |
629 | void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz) |
630 | { |
631 | vcpu->arch.count_hz = count_hz; |
632 | vcpu->arch.count_period = div_u64(dividend: (u64)NSEC_PER_SEC << 32, divisor: count_hz); |
633 | vcpu->arch.count_dyn_bias = 0; |
634 | |
635 | /* Starting at 0 */ |
636 | kvm_mips_write_count(vcpu, count: 0); |
637 | } |
638 | |
639 | /** |
640 | * kvm_mips_set_count_hz() - Update the frequency of the timer. |
641 | * @vcpu: Virtual CPU. |
642 | * @count_hz: Frequency of CP0_Count timer in Hz. |
643 | * |
644 | * Change the frequency of the CP0_Count timer. This is done atomically so that |
645 | * CP0_Count is continuous and no timer interrupt is lost. |
646 | * |
647 | * Returns: -EINVAL if @count_hz is out of range. |
648 | * 0 on success. |
649 | */ |
650 | int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) |
651 | { |
652 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
653 | int dc; |
654 | ktime_t now; |
655 | u32 count; |
656 | |
657 | /* ensure the frequency is in a sensible range... */ |
658 | if (count_hz <= 0 || count_hz > NSEC_PER_SEC) |
659 | return -EINVAL; |
660 | /* ... and has actually changed */ |
661 | if (vcpu->arch.count_hz == count_hz) |
662 | return 0; |
663 | |
664 | /* Safely freeze timer so we can keep it continuous */ |
665 | dc = kvm_mips_count_disabled(vcpu); |
666 | if (dc) { |
667 | now = kvm_mips_count_time(vcpu); |
668 | count = kvm_read_c0_guest_count(cop0); |
669 | } else { |
670 | now = kvm_mips_freeze_hrtimer(vcpu, count: &count); |
671 | } |
672 | |
673 | /* Update the frequency */ |
674 | vcpu->arch.count_hz = count_hz; |
675 | vcpu->arch.count_period = div_u64(dividend: (u64)NSEC_PER_SEC << 32, divisor: count_hz); |
676 | vcpu->arch.count_dyn_bias = 0; |
677 | |
678 | /* Calculate adjusted bias so dynamic count is unchanged */ |
679 | vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); |
680 | |
681 | /* Update and resume hrtimer */ |
682 | if (!dc) |
683 | kvm_mips_resume_hrtimer(vcpu, now, count); |
684 | return 0; |
685 | } |
686 | |
687 | /** |
688 | * kvm_mips_write_compare() - Modify compare and update timer. |
689 | * @vcpu: Virtual CPU. |
690 | * @compare: New CP0_Compare value. |
691 | * @ack: Whether to acknowledge timer interrupt. |
692 | * |
693 | * Update CP0_Compare to a new value and update the timeout. |
694 | * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure |
695 | * any pending timer interrupt is preserved. |
696 | */ |
697 | void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) |
698 | { |
699 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
700 | int dc; |
701 | u32 old_compare = kvm_read_c0_guest_compare(cop0); |
702 | s32 delta = compare - old_compare; |
703 | u32 cause; |
704 | ktime_t now = ktime_set(secs: 0, nsecs: 0); /* silence bogus GCC warning */ |
705 | u32 count; |
706 | |
707 | /* if unchanged, must just be an ack */ |
708 | if (old_compare == compare) { |
709 | if (!ack) |
710 | return; |
711 | kvm_mips_callbacks->dequeue_timer_int(vcpu); |
712 | kvm_write_c0_guest_compare(cop0, compare); |
713 | return; |
714 | } |
715 | |
716 | /* |
717 | * If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted |
718 | * too to prevent guest CP0_Count hitting guest CP0_Compare. |
719 | * |
720 | * The new GTOffset corresponds to the new value of CP0_Compare, and is |
721 | * set prior to it being written into the guest context. We disable |
722 | * preemption until the new value is written to prevent restore of a |
723 | * GTOffset corresponding to the old CP0_Compare value. |
724 | */ |
725 | if (delta > 0) { |
726 | preempt_disable(); |
727 | write_c0_gtoffset(compare - read_c0_count()); |
728 | back_to_back_c0_hazard(); |
729 | } |
730 | |
731 | /* freeze_hrtimer() takes care of timer interrupts <= count */ |
732 | dc = kvm_mips_count_disabled(vcpu); |
733 | if (!dc) |
734 | now = kvm_mips_freeze_hrtimer(vcpu, count: &count); |
735 | |
736 | if (ack) |
737 | kvm_mips_callbacks->dequeue_timer_int(vcpu); |
738 | else |
739 | /* |
740 | * With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so |
741 | * preserve guest CP0_Cause.TI if we don't want to ack it. |
742 | */ |
743 | cause = kvm_read_c0_guest_cause(cop0); |
744 | |
745 | kvm_write_c0_guest_compare(cop0, compare); |
746 | |
747 | if (delta > 0) |
748 | preempt_enable(); |
749 | |
750 | back_to_back_c0_hazard(); |
751 | |
752 | if (!ack && cause & CAUSEF_TI) |
753 | kvm_write_c0_guest_cause(cop0, cause); |
754 | |
755 | /* resume_hrtimer() takes care of timer interrupts > count */ |
756 | if (!dc) |
757 | kvm_mips_resume_hrtimer(vcpu, now, count); |
758 | |
759 | /* |
760 | * If guest CP0_Compare is moving backward, we delay CP0_GTOffset change |
761 | * until after the new CP0_Compare is written, otherwise new guest |
762 | * CP0_Count could hit new guest CP0_Compare. |
763 | */ |
764 | if (delta <= 0) |
765 | write_c0_gtoffset(compare - read_c0_count()); |
766 | } |
767 | |
768 | /** |
769 | * kvm_mips_count_disable() - Disable count. |
770 | * @vcpu: Virtual CPU. |
771 | * |
772 | * Disable the CP0_Count timer. A timer interrupt on or before the final stop |
773 | * time will be handled but not after. |
774 | * |
775 | * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or |
776 | * count_ctl.DC has been set (count disabled). |
777 | * |
778 | * Returns: The time that the timer was stopped. |
779 | */ |
780 | static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) |
781 | { |
782 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
783 | u32 count; |
784 | ktime_t now; |
785 | |
786 | /* Stop hrtimer */ |
787 | hrtimer_cancel(timer: &vcpu->arch.comparecount_timer); |
788 | |
789 | /* Set the static count from the dynamic count, handling pending TI */ |
790 | now = ktime_get(); |
791 | count = kvm_mips_read_count_running(vcpu, now); |
792 | kvm_write_c0_guest_count(cop0, count); |
793 | |
794 | return now; |
795 | } |
796 | |
797 | /** |
798 | * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC. |
799 | * @vcpu: Virtual CPU. |
800 | * |
801 | * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or |
802 | * before the final stop time will be handled if the timer isn't disabled by |
803 | * count_ctl.DC, but not after. |
804 | * |
805 | * Assumes CP0_Cause.DC is clear (count enabled). |
806 | */ |
807 | void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) |
808 | { |
809 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
810 | |
811 | kvm_set_c0_guest_cause(cop0, CAUSEF_DC); |
812 | if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) |
813 | kvm_mips_count_disable(vcpu); |
814 | } |
815 | |
816 | /** |
817 | * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC. |
818 | * @vcpu: Virtual CPU. |
819 | * |
820 | * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after |
821 | * the start time will be handled if the timer isn't disabled by count_ctl.DC, |
822 | * potentially before even returning, so the caller should be careful with |
823 | * ordering of CP0_Cause modifications so as not to lose it. |
824 | * |
825 | * Assumes CP0_Cause.DC is set (count disabled). |
826 | */ |
827 | void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) |
828 | { |
829 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
830 | u32 count; |
831 | |
832 | kvm_clear_c0_guest_cause(cop0, CAUSEF_DC); |
833 | |
834 | /* |
835 | * Set the dynamic count to match the static count. |
836 | * This starts the hrtimer if count_ctl.DC allows it. |
837 | * Otherwise it conveniently updates the biases. |
838 | */ |
839 | count = kvm_read_c0_guest_count(cop0); |
840 | kvm_mips_write_count(vcpu, count); |
841 | } |
842 | |
843 | /** |
844 | * kvm_mips_set_count_ctl() - Update the count control KVM register. |
845 | * @vcpu: Virtual CPU. |
846 | * @count_ctl: Count control register new value. |
847 | * |
848 | * Set the count control KVM register. The timer is updated accordingly. |
849 | * |
850 | * Returns: -EINVAL if reserved bits are set. |
851 | * 0 on success. |
852 | */ |
853 | int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) |
854 | { |
855 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
856 | s64 changed = count_ctl ^ vcpu->arch.count_ctl; |
857 | s64 delta; |
858 | ktime_t expire, now; |
859 | u32 count, compare; |
860 | |
861 | /* Only allow defined bits to be changed */ |
862 | if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC)) |
863 | return -EINVAL; |
864 | |
865 | /* Apply new value */ |
866 | vcpu->arch.count_ctl = count_ctl; |
867 | |
868 | /* Master CP0_Count disable */ |
869 | if (changed & KVM_REG_MIPS_COUNT_CTL_DC) { |
870 | /* Is CP0_Cause.DC already disabling CP0_Count? */ |
871 | if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) { |
872 | if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) |
873 | /* Just record the current time */ |
874 | vcpu->arch.count_resume = ktime_get(); |
875 | } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) { |
876 | /* disable timer and record current time */ |
877 | vcpu->arch.count_resume = kvm_mips_count_disable(vcpu); |
878 | } else { |
879 | /* |
880 | * Calculate timeout relative to static count at resume |
881 | * time (wrap 0 to 2^32). |
882 | */ |
883 | count = kvm_read_c0_guest_count(cop0); |
884 | compare = kvm_read_c0_guest_compare(cop0); |
885 | delta = (u64)(u32)(compare - count - 1) + 1; |
886 | delta = div_u64(dividend: delta * NSEC_PER_SEC, |
887 | divisor: vcpu->arch.count_hz); |
888 | expire = ktime_add_ns(vcpu->arch.count_resume, delta); |
889 | |
890 | /* Handle pending interrupt */ |
891 | now = ktime_get(); |
892 | if (ktime_compare(now, expire) >= 0) |
893 | /* Nothing should be waiting on the timeout */ |
894 | kvm_mips_callbacks->queue_timer_int(vcpu); |
895 | |
896 | /* Resume hrtimer without changing bias */ |
897 | count = kvm_mips_read_count_running(vcpu, now); |
898 | kvm_mips_resume_hrtimer(vcpu, now, count); |
899 | } |
900 | } |
901 | |
902 | return 0; |
903 | } |
904 | |
905 | /** |
906 | * kvm_mips_set_count_resume() - Update the count resume KVM register. |
907 | * @vcpu: Virtual CPU. |
908 | * @count_resume: Count resume register new value. |
909 | * |
910 | * Set the count resume KVM register. |
911 | * |
912 | * Returns: -EINVAL if out of valid range (0..now). |
913 | * 0 on success. |
914 | */ |
915 | int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume) |
916 | { |
917 | /* |
918 | * It doesn't make sense for the resume time to be in the future, as it |
919 | * would be possible for the next interrupt to be more than a full |
920 | * period in the future. |
921 | */ |
922 | if (count_resume < 0 || count_resume > ktime_to_ns(kt: ktime_get())) |
923 | return -EINVAL; |
924 | |
925 | vcpu->arch.count_resume = ns_to_ktime(ns: count_resume); |
926 | return 0; |
927 | } |
928 | |
929 | /** |
930 | * kvm_mips_count_timeout() - Push timer forward on timeout. |
931 | * @vcpu: Virtual CPU. |
932 | * |
933 | * Handle an hrtimer event by push the hrtimer forward a period. |
934 | * |
935 | * Returns: The hrtimer_restart value to return to the hrtimer subsystem. |
936 | */ |
937 | enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu) |
938 | { |
939 | /* Add the Count period to the current expiry time */ |
940 | hrtimer_add_expires_ns(timer: &vcpu->arch.comparecount_timer, |
941 | ns: vcpu->arch.count_period); |
942 | return HRTIMER_RESTART; |
943 | } |
944 | |
945 | enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) |
946 | { |
947 | kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n" , vcpu->arch.pc, |
948 | vcpu->arch.pending_exceptions); |
949 | |
950 | ++vcpu->stat.wait_exits; |
951 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT); |
952 | if (!vcpu->arch.pending_exceptions) { |
953 | kvm_vz_lose_htimer(vcpu); |
954 | vcpu->arch.wait = 1; |
955 | kvm_vcpu_halt(vcpu); |
956 | |
957 | /* |
958 | * We are runnable, then definitely go off to user space to |
959 | * check if any I/O interrupts are pending. |
960 | */ |
961 | if (kvm_arch_vcpu_runnable(vcpu)) |
962 | vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; |
963 | } |
964 | |
965 | return EMULATE_DONE; |
966 | } |
967 | |
968 | enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, |
969 | u32 cause, |
970 | struct kvm_vcpu *vcpu) |
971 | { |
972 | int r; |
973 | enum emulation_result er; |
974 | u32 rt; |
975 | struct kvm_run *run = vcpu->run; |
976 | void *data = run->mmio.data; |
977 | unsigned int imme; |
978 | unsigned long curr_pc; |
979 | |
980 | /* |
981 | * Update PC and hold onto current PC in case there is |
982 | * an error and we want to rollback the PC |
983 | */ |
984 | curr_pc = vcpu->arch.pc; |
985 | er = update_pc(vcpu, cause); |
986 | if (er == EMULATE_FAIL) |
987 | return er; |
988 | |
989 | rt = inst.i_format.rt; |
990 | |
991 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( |
992 | vcpu->arch.host_cp0_badvaddr); |
993 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) |
994 | goto out_fail; |
995 | |
996 | switch (inst.i_format.opcode) { |
997 | #if defined(CONFIG_64BIT) |
998 | case sd_op: |
999 | run->mmio.len = 8; |
1000 | *(u64 *)data = vcpu->arch.gprs[rt]; |
1001 | |
1002 | kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n" , |
1003 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, |
1004 | vcpu->arch.gprs[rt], *(u64 *)data); |
1005 | break; |
1006 | #endif |
1007 | |
1008 | case sw_op: |
1009 | run->mmio.len = 4; |
1010 | *(u32 *)data = vcpu->arch.gprs[rt]; |
1011 | |
1012 | kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n" , |
1013 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, |
1014 | vcpu->arch.gprs[rt], *(u32 *)data); |
1015 | break; |
1016 | |
1017 | case sh_op: |
1018 | run->mmio.len = 2; |
1019 | *(u16 *)data = vcpu->arch.gprs[rt]; |
1020 | |
1021 | kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n" , |
1022 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, |
1023 | vcpu->arch.gprs[rt], *(u16 *)data); |
1024 | break; |
1025 | |
1026 | case sb_op: |
1027 | run->mmio.len = 1; |
1028 | *(u8 *)data = vcpu->arch.gprs[rt]; |
1029 | |
1030 | kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n" , |
1031 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, |
1032 | vcpu->arch.gprs[rt], *(u8 *)data); |
1033 | break; |
1034 | |
1035 | case swl_op: |
1036 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( |
1037 | vcpu->arch.host_cp0_badvaddr) & (~0x3); |
1038 | run->mmio.len = 4; |
1039 | imme = vcpu->arch.host_cp0_badvaddr & 0x3; |
1040 | switch (imme) { |
1041 | case 0: |
1042 | *(u32 *)data = ((*(u32 *)data) & 0xffffff00) | |
1043 | (vcpu->arch.gprs[rt] >> 24); |
1044 | break; |
1045 | case 1: |
1046 | *(u32 *)data = ((*(u32 *)data) & 0xffff0000) | |
1047 | (vcpu->arch.gprs[rt] >> 16); |
1048 | break; |
1049 | case 2: |
1050 | *(u32 *)data = ((*(u32 *)data) & 0xff000000) | |
1051 | (vcpu->arch.gprs[rt] >> 8); |
1052 | break; |
1053 | case 3: |
1054 | *(u32 *)data = vcpu->arch.gprs[rt]; |
1055 | break; |
1056 | default: |
1057 | break; |
1058 | } |
1059 | |
1060 | kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n" , |
1061 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, |
1062 | vcpu->arch.gprs[rt], *(u32 *)data); |
1063 | break; |
1064 | |
1065 | case swr_op: |
1066 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( |
1067 | vcpu->arch.host_cp0_badvaddr) & (~0x3); |
1068 | run->mmio.len = 4; |
1069 | imme = vcpu->arch.host_cp0_badvaddr & 0x3; |
1070 | switch (imme) { |
1071 | case 0: |
1072 | *(u32 *)data = vcpu->arch.gprs[rt]; |
1073 | break; |
1074 | case 1: |
1075 | *(u32 *)data = ((*(u32 *)data) & 0xff) | |
1076 | (vcpu->arch.gprs[rt] << 8); |
1077 | break; |
1078 | case 2: |
1079 | *(u32 *)data = ((*(u32 *)data) & 0xffff) | |
1080 | (vcpu->arch.gprs[rt] << 16); |
1081 | break; |
1082 | case 3: |
1083 | *(u32 *)data = ((*(u32 *)data) & 0xffffff) | |
1084 | (vcpu->arch.gprs[rt] << 24); |
1085 | break; |
1086 | default: |
1087 | break; |
1088 | } |
1089 | |
1090 | kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n" , |
1091 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, |
1092 | vcpu->arch.gprs[rt], *(u32 *)data); |
1093 | break; |
1094 | |
1095 | #if defined(CONFIG_64BIT) |
1096 | case sdl_op: |
1097 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( |
1098 | vcpu->arch.host_cp0_badvaddr) & (~0x7); |
1099 | |
1100 | run->mmio.len = 8; |
1101 | imme = vcpu->arch.host_cp0_badvaddr & 0x7; |
1102 | switch (imme) { |
1103 | case 0: |
1104 | *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) | |
1105 | ((vcpu->arch.gprs[rt] >> 56) & 0xff); |
1106 | break; |
1107 | case 1: |
1108 | *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) | |
1109 | ((vcpu->arch.gprs[rt] >> 48) & 0xffff); |
1110 | break; |
1111 | case 2: |
1112 | *(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) | |
1113 | ((vcpu->arch.gprs[rt] >> 40) & 0xffffff); |
1114 | break; |
1115 | case 3: |
1116 | *(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) | |
1117 | ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff); |
1118 | break; |
1119 | case 4: |
1120 | *(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) | |
1121 | ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff); |
1122 | break; |
1123 | case 5: |
1124 | *(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) | |
1125 | ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff); |
1126 | break; |
1127 | case 6: |
1128 | *(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) | |
1129 | ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff); |
1130 | break; |
1131 | case 7: |
1132 | *(u64 *)data = vcpu->arch.gprs[rt]; |
1133 | break; |
1134 | default: |
1135 | break; |
1136 | } |
1137 | |
1138 | kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n" , |
1139 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, |
1140 | vcpu->arch.gprs[rt], *(u64 *)data); |
1141 | break; |
1142 | |
1143 | case sdr_op: |
1144 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( |
1145 | vcpu->arch.host_cp0_badvaddr) & (~0x7); |
1146 | |
1147 | run->mmio.len = 8; |
1148 | imme = vcpu->arch.host_cp0_badvaddr & 0x7; |
1149 | switch (imme) { |
1150 | case 0: |
1151 | *(u64 *)data = vcpu->arch.gprs[rt]; |
1152 | break; |
1153 | case 1: |
1154 | *(u64 *)data = ((*(u64 *)data) & 0xff) | |
1155 | (vcpu->arch.gprs[rt] << 8); |
1156 | break; |
1157 | case 2: |
1158 | *(u64 *)data = ((*(u64 *)data) & 0xffff) | |
1159 | (vcpu->arch.gprs[rt] << 16); |
1160 | break; |
1161 | case 3: |
1162 | *(u64 *)data = ((*(u64 *)data) & 0xffffff) | |
1163 | (vcpu->arch.gprs[rt] << 24); |
1164 | break; |
1165 | case 4: |
1166 | *(u64 *)data = ((*(u64 *)data) & 0xffffffff) | |
1167 | (vcpu->arch.gprs[rt] << 32); |
1168 | break; |
1169 | case 5: |
1170 | *(u64 *)data = ((*(u64 *)data) & 0xffffffffff) | |
1171 | (vcpu->arch.gprs[rt] << 40); |
1172 | break; |
1173 | case 6: |
1174 | *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) | |
1175 | (vcpu->arch.gprs[rt] << 48); |
1176 | break; |
1177 | case 7: |
1178 | *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) | |
1179 | (vcpu->arch.gprs[rt] << 56); |
1180 | break; |
1181 | default: |
1182 | break; |
1183 | } |
1184 | |
1185 | kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n" , |
1186 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, |
1187 | vcpu->arch.gprs[rt], *(u64 *)data); |
1188 | break; |
1189 | #endif |
1190 | |
1191 | #ifdef CONFIG_CPU_LOONGSON64 |
1192 | case sdc2_op: |
1193 | rt = inst.loongson3_lsdc2_format.rt; |
1194 | switch (inst.loongson3_lsdc2_format.opcode1) { |
1195 | /* |
1196 | * Loongson-3 overridden sdc2 instructions. |
1197 | * opcode1 instruction |
1198 | * 0x0 gssbx: store 1 bytes from GPR |
1199 | * 0x1 gsshx: store 2 bytes from GPR |
1200 | * 0x2 gsswx: store 4 bytes from GPR |
1201 | * 0x3 gssdx: store 8 bytes from GPR |
1202 | */ |
1203 | case 0x0: |
1204 | run->mmio.len = 1; |
1205 | *(u8 *)data = vcpu->arch.gprs[rt]; |
1206 | |
1207 | kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n" , |
1208 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, |
1209 | vcpu->arch.gprs[rt], *(u8 *)data); |
1210 | break; |
1211 | case 0x1: |
1212 | run->mmio.len = 2; |
1213 | *(u16 *)data = vcpu->arch.gprs[rt]; |
1214 | |
1215 | kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n" , |
1216 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, |
1217 | vcpu->arch.gprs[rt], *(u16 *)data); |
1218 | break; |
1219 | case 0x2: |
1220 | run->mmio.len = 4; |
1221 | *(u32 *)data = vcpu->arch.gprs[rt]; |
1222 | |
1223 | kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n" , |
1224 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, |
1225 | vcpu->arch.gprs[rt], *(u32 *)data); |
1226 | break; |
1227 | case 0x3: |
1228 | run->mmio.len = 8; |
1229 | *(u64 *)data = vcpu->arch.gprs[rt]; |
1230 | |
1231 | kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n" , |
1232 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, |
1233 | vcpu->arch.gprs[rt], *(u64 *)data); |
1234 | break; |
1235 | default: |
1236 | kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n" , |
1237 | inst.word); |
1238 | break; |
1239 | } |
1240 | break; |
1241 | #endif |
1242 | default: |
1243 | kvm_err("Store not yet supported (inst=0x%08x)\n" , |
1244 | inst.word); |
1245 | goto out_fail; |
1246 | } |
1247 | |
1248 | vcpu->mmio_needed = 1; |
1249 | run->mmio.is_write = 1; |
1250 | vcpu->mmio_is_write = 1; |
1251 | |
1252 | r = kvm_io_bus_write(vcpu, bus_idx: KVM_MMIO_BUS, |
1253 | addr: run->mmio.phys_addr, len: run->mmio.len, val: data); |
1254 | |
1255 | if (!r) { |
1256 | vcpu->mmio_needed = 0; |
1257 | return EMULATE_DONE; |
1258 | } |
1259 | |
1260 | return EMULATE_DO_MMIO; |
1261 | |
1262 | out_fail: |
1263 | /* Rollback PC if emulation was unsuccessful */ |
1264 | vcpu->arch.pc = curr_pc; |
1265 | return EMULATE_FAIL; |
1266 | } |
1267 | |
1268 | enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, |
1269 | u32 cause, struct kvm_vcpu *vcpu) |
1270 | { |
1271 | struct kvm_run *run = vcpu->run; |
1272 | int r; |
1273 | enum emulation_result er; |
1274 | unsigned long curr_pc; |
1275 | u32 op, rt; |
1276 | unsigned int imme; |
1277 | |
1278 | rt = inst.i_format.rt; |
1279 | op = inst.i_format.opcode; |
1280 | |
1281 | /* |
1282 | * Find the resume PC now while we have safe and easy access to the |
1283 | * prior branch instruction, and save it for |
1284 | * kvm_mips_complete_mmio_load() to restore later. |
1285 | */ |
1286 | curr_pc = vcpu->arch.pc; |
1287 | er = update_pc(vcpu, cause); |
1288 | if (er == EMULATE_FAIL) |
1289 | return er; |
1290 | vcpu->arch.io_pc = vcpu->arch.pc; |
1291 | vcpu->arch.pc = curr_pc; |
1292 | |
1293 | vcpu->arch.io_gpr = rt; |
1294 | |
1295 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( |
1296 | vcpu->arch.host_cp0_badvaddr); |
1297 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) |
1298 | return EMULATE_FAIL; |
1299 | |
1300 | vcpu->mmio_needed = 2; /* signed */ |
1301 | switch (op) { |
1302 | #if defined(CONFIG_64BIT) |
1303 | case ld_op: |
1304 | run->mmio.len = 8; |
1305 | break; |
1306 | |
1307 | case lwu_op: |
1308 | vcpu->mmio_needed = 1; /* unsigned */ |
1309 | fallthrough; |
1310 | #endif |
1311 | case lw_op: |
1312 | run->mmio.len = 4; |
1313 | break; |
1314 | |
1315 | case lhu_op: |
1316 | vcpu->mmio_needed = 1; /* unsigned */ |
1317 | fallthrough; |
1318 | case lh_op: |
1319 | run->mmio.len = 2; |
1320 | break; |
1321 | |
1322 | case lbu_op: |
1323 | vcpu->mmio_needed = 1; /* unsigned */ |
1324 | fallthrough; |
1325 | case lb_op: |
1326 | run->mmio.len = 1; |
1327 | break; |
1328 | |
1329 | case lwl_op: |
1330 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( |
1331 | vcpu->arch.host_cp0_badvaddr) & (~0x3); |
1332 | |
1333 | run->mmio.len = 4; |
1334 | imme = vcpu->arch.host_cp0_badvaddr & 0x3; |
1335 | switch (imme) { |
1336 | case 0: |
1337 | vcpu->mmio_needed = 3; /* 1 byte */ |
1338 | break; |
1339 | case 1: |
1340 | vcpu->mmio_needed = 4; /* 2 bytes */ |
1341 | break; |
1342 | case 2: |
1343 | vcpu->mmio_needed = 5; /* 3 bytes */ |
1344 | break; |
1345 | case 3: |
1346 | vcpu->mmio_needed = 6; /* 4 bytes */ |
1347 | break; |
1348 | default: |
1349 | break; |
1350 | } |
1351 | break; |
1352 | |
1353 | case lwr_op: |
1354 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( |
1355 | vcpu->arch.host_cp0_badvaddr) & (~0x3); |
1356 | |
1357 | run->mmio.len = 4; |
1358 | imme = vcpu->arch.host_cp0_badvaddr & 0x3; |
1359 | switch (imme) { |
1360 | case 0: |
1361 | vcpu->mmio_needed = 7; /* 4 bytes */ |
1362 | break; |
1363 | case 1: |
1364 | vcpu->mmio_needed = 8; /* 3 bytes */ |
1365 | break; |
1366 | case 2: |
1367 | vcpu->mmio_needed = 9; /* 2 bytes */ |
1368 | break; |
1369 | case 3: |
1370 | vcpu->mmio_needed = 10; /* 1 byte */ |
1371 | break; |
1372 | default: |
1373 | break; |
1374 | } |
1375 | break; |
1376 | |
1377 | #if defined(CONFIG_64BIT) |
1378 | case ldl_op: |
1379 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( |
1380 | vcpu->arch.host_cp0_badvaddr) & (~0x7); |
1381 | |
1382 | run->mmio.len = 8; |
1383 | imme = vcpu->arch.host_cp0_badvaddr & 0x7; |
1384 | switch (imme) { |
1385 | case 0: |
1386 | vcpu->mmio_needed = 11; /* 1 byte */ |
1387 | break; |
1388 | case 1: |
1389 | vcpu->mmio_needed = 12; /* 2 bytes */ |
1390 | break; |
1391 | case 2: |
1392 | vcpu->mmio_needed = 13; /* 3 bytes */ |
1393 | break; |
1394 | case 3: |
1395 | vcpu->mmio_needed = 14; /* 4 bytes */ |
1396 | break; |
1397 | case 4: |
1398 | vcpu->mmio_needed = 15; /* 5 bytes */ |
1399 | break; |
1400 | case 5: |
1401 | vcpu->mmio_needed = 16; /* 6 bytes */ |
1402 | break; |
1403 | case 6: |
1404 | vcpu->mmio_needed = 17; /* 7 bytes */ |
1405 | break; |
1406 | case 7: |
1407 | vcpu->mmio_needed = 18; /* 8 bytes */ |
1408 | break; |
1409 | default: |
1410 | break; |
1411 | } |
1412 | break; |
1413 | |
1414 | case ldr_op: |
1415 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( |
1416 | vcpu->arch.host_cp0_badvaddr) & (~0x7); |
1417 | |
1418 | run->mmio.len = 8; |
1419 | imme = vcpu->arch.host_cp0_badvaddr & 0x7; |
1420 | switch (imme) { |
1421 | case 0: |
1422 | vcpu->mmio_needed = 19; /* 8 bytes */ |
1423 | break; |
1424 | case 1: |
1425 | vcpu->mmio_needed = 20; /* 7 bytes */ |
1426 | break; |
1427 | case 2: |
1428 | vcpu->mmio_needed = 21; /* 6 bytes */ |
1429 | break; |
1430 | case 3: |
1431 | vcpu->mmio_needed = 22; /* 5 bytes */ |
1432 | break; |
1433 | case 4: |
1434 | vcpu->mmio_needed = 23; /* 4 bytes */ |
1435 | break; |
1436 | case 5: |
1437 | vcpu->mmio_needed = 24; /* 3 bytes */ |
1438 | break; |
1439 | case 6: |
1440 | vcpu->mmio_needed = 25; /* 2 bytes */ |
1441 | break; |
1442 | case 7: |
1443 | vcpu->mmio_needed = 26; /* 1 byte */ |
1444 | break; |
1445 | default: |
1446 | break; |
1447 | } |
1448 | break; |
1449 | #endif |
1450 | |
1451 | #ifdef CONFIG_CPU_LOONGSON64 |
1452 | case ldc2_op: |
1453 | rt = inst.loongson3_lsdc2_format.rt; |
1454 | switch (inst.loongson3_lsdc2_format.opcode1) { |
1455 | /* |
1456 | * Loongson-3 overridden ldc2 instructions. |
1457 | * opcode1 instruction |
1458 | * 0x0 gslbx: store 1 bytes from GPR |
1459 | * 0x1 gslhx: store 2 bytes from GPR |
1460 | * 0x2 gslwx: store 4 bytes from GPR |
1461 | * 0x3 gsldx: store 8 bytes from GPR |
1462 | */ |
1463 | case 0x0: |
1464 | run->mmio.len = 1; |
1465 | vcpu->mmio_needed = 27; /* signed */ |
1466 | break; |
1467 | case 0x1: |
1468 | run->mmio.len = 2; |
1469 | vcpu->mmio_needed = 28; /* signed */ |
1470 | break; |
1471 | case 0x2: |
1472 | run->mmio.len = 4; |
1473 | vcpu->mmio_needed = 29; /* signed */ |
1474 | break; |
1475 | case 0x3: |
1476 | run->mmio.len = 8; |
1477 | vcpu->mmio_needed = 30; /* signed */ |
1478 | break; |
1479 | default: |
1480 | kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n" , |
1481 | inst.word); |
1482 | break; |
1483 | } |
1484 | break; |
1485 | #endif |
1486 | |
1487 | default: |
1488 | kvm_err("Load not yet supported (inst=0x%08x)\n" , |
1489 | inst.word); |
1490 | vcpu->mmio_needed = 0; |
1491 | return EMULATE_FAIL; |
1492 | } |
1493 | |
1494 | run->mmio.is_write = 0; |
1495 | vcpu->mmio_is_write = 0; |
1496 | |
1497 | r = kvm_io_bus_read(vcpu, bus_idx: KVM_MMIO_BUS, |
1498 | addr: run->mmio.phys_addr, len: run->mmio.len, val: run->mmio.data); |
1499 | |
1500 | if (!r) { |
1501 | kvm_mips_complete_mmio_load(vcpu); |
1502 | vcpu->mmio_needed = 0; |
1503 | return EMULATE_DONE; |
1504 | } |
1505 | |
1506 | return EMULATE_DO_MMIO; |
1507 | } |
1508 | |
1509 | enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu) |
1510 | { |
1511 | struct kvm_run *run = vcpu->run; |
1512 | unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; |
1513 | enum emulation_result er = EMULATE_DONE; |
1514 | |
1515 | if (run->mmio.len > sizeof(*gpr)) { |
1516 | kvm_err("Bad MMIO length: %d" , run->mmio.len); |
1517 | er = EMULATE_FAIL; |
1518 | goto done; |
1519 | } |
1520 | |
1521 | /* Restore saved resume PC */ |
1522 | vcpu->arch.pc = vcpu->arch.io_pc; |
1523 | |
1524 | switch (run->mmio.len) { |
1525 | case 8: |
1526 | switch (vcpu->mmio_needed) { |
1527 | case 11: |
1528 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) | |
1529 | (((*(s64 *)run->mmio.data) & 0xff) << 56); |
1530 | break; |
1531 | case 12: |
1532 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) | |
1533 | (((*(s64 *)run->mmio.data) & 0xffff) << 48); |
1534 | break; |
1535 | case 13: |
1536 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) | |
1537 | (((*(s64 *)run->mmio.data) & 0xffffff) << 40); |
1538 | break; |
1539 | case 14: |
1540 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) | |
1541 | (((*(s64 *)run->mmio.data) & 0xffffffff) << 32); |
1542 | break; |
1543 | case 15: |
1544 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) | |
1545 | (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24); |
1546 | break; |
1547 | case 16: |
1548 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) | |
1549 | (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16); |
1550 | break; |
1551 | case 17: |
1552 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) | |
1553 | (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8); |
1554 | break; |
1555 | case 18: |
1556 | case 19: |
1557 | *gpr = *(s64 *)run->mmio.data; |
1558 | break; |
1559 | case 20: |
1560 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) | |
1561 | ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff); |
1562 | break; |
1563 | case 21: |
1564 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) | |
1565 | ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff); |
1566 | break; |
1567 | case 22: |
1568 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) | |
1569 | ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff); |
1570 | break; |
1571 | case 23: |
1572 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) | |
1573 | ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff); |
1574 | break; |
1575 | case 24: |
1576 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) | |
1577 | ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff); |
1578 | break; |
1579 | case 25: |
1580 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) | |
1581 | ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff); |
1582 | break; |
1583 | case 26: |
1584 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) | |
1585 | ((((*(s64 *)run->mmio.data)) >> 56) & 0xff); |
1586 | break; |
1587 | default: |
1588 | *gpr = *(s64 *)run->mmio.data; |
1589 | } |
1590 | break; |
1591 | |
1592 | case 4: |
1593 | switch (vcpu->mmio_needed) { |
1594 | case 1: |
1595 | *gpr = *(u32 *)run->mmio.data; |
1596 | break; |
1597 | case 2: |
1598 | *gpr = *(s32 *)run->mmio.data; |
1599 | break; |
1600 | case 3: |
1601 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) | |
1602 | (((*(s32 *)run->mmio.data) & 0xff) << 24); |
1603 | break; |
1604 | case 4: |
1605 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) | |
1606 | (((*(s32 *)run->mmio.data) & 0xffff) << 16); |
1607 | break; |
1608 | case 5: |
1609 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) | |
1610 | (((*(s32 *)run->mmio.data) & 0xffffff) << 8); |
1611 | break; |
1612 | case 6: |
1613 | case 7: |
1614 | *gpr = *(s32 *)run->mmio.data; |
1615 | break; |
1616 | case 8: |
1617 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) | |
1618 | ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff); |
1619 | break; |
1620 | case 9: |
1621 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) | |
1622 | ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff); |
1623 | break; |
1624 | case 10: |
1625 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) | |
1626 | ((((*(s32 *)run->mmio.data)) >> 24) & 0xff); |
1627 | break; |
1628 | default: |
1629 | *gpr = *(s32 *)run->mmio.data; |
1630 | } |
1631 | break; |
1632 | |
1633 | case 2: |
1634 | if (vcpu->mmio_needed == 1) |
1635 | *gpr = *(u16 *)run->mmio.data; |
1636 | else |
1637 | *gpr = *(s16 *)run->mmio.data; |
1638 | |
1639 | break; |
1640 | case 1: |
1641 | if (vcpu->mmio_needed == 1) |
1642 | *gpr = *(u8 *)run->mmio.data; |
1643 | else |
1644 | *gpr = *(s8 *)run->mmio.data; |
1645 | break; |
1646 | } |
1647 | |
1648 | done: |
1649 | return er; |
1650 | } |
1651 | |