1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * |
4 | * Copyright SUSE Linux Products GmbH 2009 |
5 | * |
6 | * Authors: Alexander Graf <agraf@suse.de> |
7 | */ |
8 | |
9 | #include <linux/types.h> |
10 | #include <linux/string.h> |
11 | #include <linux/kvm.h> |
12 | #include <linux/kvm_host.h> |
13 | #include <linux/highmem.h> |
14 | |
15 | #include <asm/kvm_ppc.h> |
16 | #include <asm/kvm_book3s.h> |
17 | #include <asm/book3s/64/mmu-hash.h> |
18 | |
19 | /* #define DEBUG_MMU */ |
20 | |
21 | #ifdef DEBUG_MMU |
22 | #define dprintk(X...) printk(KERN_INFO X) |
23 | #else |
24 | #define dprintk(X...) do { } while(0) |
25 | #endif |
26 | |
27 | static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( |
28 | struct kvm_vcpu *vcpu, |
29 | gva_t eaddr) |
30 | { |
31 | int i; |
32 | u64 esid = GET_ESID(eaddr); |
33 | u64 esid_1t = GET_ESID_1T(eaddr); |
34 | |
35 | for (i = 0; i < vcpu->arch.slb_nr; i++) { |
36 | u64 cmp_esid = esid; |
37 | |
38 | if (!vcpu->arch.slb[i].valid) |
39 | continue; |
40 | |
41 | if (vcpu->arch.slb[i].tb) |
42 | cmp_esid = esid_1t; |
43 | |
44 | if (vcpu->arch.slb[i].esid == cmp_esid) |
45 | return &vcpu->arch.slb[i]; |
46 | } |
47 | |
48 | dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n" , |
49 | eaddr, esid, esid_1t); |
50 | for (i = 0; i < vcpu->arch.slb_nr; i++) { |
51 | if (vcpu->arch.slb[i].vsid) |
52 | dprintk(" %d: %c%c%c %llx %llx\n" , i, |
53 | vcpu->arch.slb[i].valid ? 'v' : ' ', |
54 | vcpu->arch.slb[i].large ? 'l' : ' ', |
55 | vcpu->arch.slb[i].tb ? 't' : ' ', |
56 | vcpu->arch.slb[i].esid, |
57 | vcpu->arch.slb[i].vsid); |
58 | } |
59 | |
60 | return NULL; |
61 | } |
62 | |
63 | static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe) |
64 | { |
65 | return slbe->tb ? SID_SHIFT_1T : SID_SHIFT; |
66 | } |
67 | |
68 | static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe) |
69 | { |
70 | return (1ul << kvmppc_slb_sid_shift(slbe)) - 1; |
71 | } |
72 | |
73 | static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr) |
74 | { |
75 | eaddr &= kvmppc_slb_offset_mask(slbe: slb); |
76 | |
77 | return (eaddr >> VPN_SHIFT) | |
78 | ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT)); |
79 | } |
80 | |
81 | static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, |
82 | bool data) |
83 | { |
84 | struct kvmppc_slb *slb; |
85 | |
86 | slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); |
87 | if (!slb) |
88 | return 0; |
89 | |
90 | return kvmppc_slb_calc_vpn(slb, eaddr); |
91 | } |
92 | |
93 | static int mmu_pagesize(int mmu_pg) |
94 | { |
95 | switch (mmu_pg) { |
96 | case MMU_PAGE_64K: |
97 | return 16; |
98 | case MMU_PAGE_16M: |
99 | return 24; |
100 | } |
101 | return 12; |
102 | } |
103 | |
104 | static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) |
105 | { |
106 | return mmu_pagesize(mmu_pg: slbe->base_page_size); |
107 | } |
108 | |
109 | static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) |
110 | { |
111 | int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); |
112 | |
113 | return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p); |
114 | } |
115 | |
116 | static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu, |
117 | struct kvmppc_slb *slbe, gva_t eaddr, |
118 | bool second) |
119 | { |
120 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
121 | u64 hash, pteg, htabsize; |
122 | u32 ssize; |
123 | hva_t r; |
124 | u64 vpn; |
125 | |
126 | htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1); |
127 | |
128 | vpn = kvmppc_slb_calc_vpn(slb: slbe, eaddr); |
129 | ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M; |
130 | hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize); |
131 | if (second) |
132 | hash = ~hash; |
133 | hash &= ((1ULL << 39ULL) - 1ULL); |
134 | hash &= htabsize; |
135 | hash <<= 7ULL; |
136 | |
137 | pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL; |
138 | pteg |= hash; |
139 | |
140 | dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n" , |
141 | page, vcpu_book3s->sdr1, pteg, slbe->vsid); |
142 | |
143 | /* When running a PAPR guest, SDR1 contains a HVA address instead |
144 | of a GPA */ |
145 | if (vcpu->arch.papr_enabled) |
146 | r = pteg; |
147 | else |
148 | r = gfn_to_hva(kvm: vcpu->kvm, gfn: pteg >> PAGE_SHIFT); |
149 | |
150 | if (kvm_is_error_hva(addr: r)) |
151 | return r; |
152 | return r | (pteg & ~PAGE_MASK); |
153 | } |
154 | |
155 | static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr) |
156 | { |
157 | int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); |
158 | u64 avpn; |
159 | |
160 | avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); |
161 | avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p); |
162 | |
163 | if (p < 16) |
164 | avpn >>= ((80 - p) - 56) - 8; /* 16 - p */ |
165 | else |
166 | avpn <<= p - 16; |
167 | |
168 | return avpn; |
169 | } |
170 | |
171 | /* |
172 | * Return page size encoded in the second word of a HPTE, or |
173 | * -1 for an invalid encoding for the base page size indicated by |
174 | * the SLB entry. This doesn't handle mixed pagesize segments yet. |
175 | */ |
176 | static int decode_pagesize(struct kvmppc_slb *slbe, u64 r) |
177 | { |
178 | switch (slbe->base_page_size) { |
179 | case MMU_PAGE_64K: |
180 | if ((r & 0xf000) == 0x1000) |
181 | return MMU_PAGE_64K; |
182 | break; |
183 | case MMU_PAGE_16M: |
184 | if ((r & 0xff000) == 0) |
185 | return MMU_PAGE_16M; |
186 | break; |
187 | } |
188 | return -1; |
189 | } |
190 | |
191 | static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
192 | struct kvmppc_pte *gpte, bool data, |
193 | bool iswrite) |
194 | { |
195 | struct kvmppc_slb *slbe; |
196 | hva_t ptegp; |
197 | u64 pteg[16]; |
198 | u64 avpn = 0; |
199 | u64 r; |
200 | u64 v_val, v_mask; |
201 | u64 eaddr_mask; |
202 | int i; |
203 | u8 pp, key = 0; |
204 | bool found = false; |
205 | bool second = false; |
206 | int pgsize; |
207 | ulong mp_ea = vcpu->arch.magic_page_ea; |
208 | |
209 | /* Magic page override */ |
210 | if (unlikely(mp_ea) && |
211 | unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && |
212 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
213 | gpte->eaddr = eaddr; |
214 | gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); |
215 | gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); |
216 | gpte->raddr &= KVM_PAM; |
217 | gpte->may_execute = true; |
218 | gpte->may_read = true; |
219 | gpte->may_write = true; |
220 | gpte->page_size = MMU_PAGE_4K; |
221 | gpte->wimg = HPTE_R_M; |
222 | |
223 | return 0; |
224 | } |
225 | |
226 | slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); |
227 | if (!slbe) |
228 | goto no_seg_found; |
229 | |
230 | avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr); |
231 | v_val = avpn & HPTE_V_AVPN; |
232 | |
233 | if (slbe->tb) |
234 | v_val |= SLB_VSID_B_1T; |
235 | if (slbe->large) |
236 | v_val |= HPTE_V_LARGE; |
237 | v_val |= HPTE_V_VALID; |
238 | |
239 | v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID | |
240 | HPTE_V_SECONDARY; |
241 | |
242 | pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K; |
243 | |
244 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
245 | |
246 | do_second: |
247 | ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second); |
248 | if (kvm_is_error_hva(addr: ptegp)) |
249 | goto no_page_found; |
250 | |
251 | if(copy_from_user(to: pteg, from: (void __user *)ptegp, n: sizeof(pteg))) { |
252 | printk_ratelimited(KERN_ERR |
253 | "KVM: Can't copy data from 0x%lx!\n" , ptegp); |
254 | goto no_page_found; |
255 | } |
256 | |
257 | if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp) |
258 | key = 4; |
259 | else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks) |
260 | key = 4; |
261 | |
262 | for (i=0; i<16; i+=2) { |
263 | u64 pte0 = be64_to_cpu(pteg[i]); |
264 | u64 pte1 = be64_to_cpu(pteg[i + 1]); |
265 | |
266 | /* Check all relevant fields of 1st dword */ |
267 | if ((pte0 & v_mask) == v_val) { |
268 | /* If large page bit is set, check pgsize encoding */ |
269 | if (slbe->large && |
270 | (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { |
271 | pgsize = decode_pagesize(slbe, r: pte1); |
272 | if (pgsize < 0) |
273 | continue; |
274 | } |
275 | found = true; |
276 | break; |
277 | } |
278 | } |
279 | |
280 | if (!found) { |
281 | if (second) |
282 | goto no_page_found; |
283 | v_val |= HPTE_V_SECONDARY; |
284 | second = true; |
285 | goto do_second; |
286 | } |
287 | |
288 | r = be64_to_cpu(pteg[i+1]); |
289 | pp = (r & HPTE_R_PP) | key; |
290 | if (r & HPTE_R_PP0) |
291 | pp |= 8; |
292 | |
293 | gpte->eaddr = eaddr; |
294 | gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); |
295 | |
296 | eaddr_mask = (1ull << mmu_pagesize(mmu_pg: pgsize)) - 1; |
297 | gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask); |
298 | gpte->page_size = pgsize; |
299 | gpte->may_execute = ((r & HPTE_R_N) ? false : true); |
300 | if (unlikely(vcpu->arch.disable_kernel_nx) && |
301 | !(kvmppc_get_msr(vcpu) & MSR_PR)) |
302 | gpte->may_execute = true; |
303 | gpte->may_read = false; |
304 | gpte->may_write = false; |
305 | gpte->wimg = r & HPTE_R_WIMG; |
306 | |
307 | switch (pp) { |
308 | case 0: |
309 | case 1: |
310 | case 2: |
311 | case 6: |
312 | gpte->may_write = true; |
313 | fallthrough; |
314 | case 3: |
315 | case 5: |
316 | case 7: |
317 | case 10: |
318 | gpte->may_read = true; |
319 | break; |
320 | } |
321 | |
322 | dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx " |
323 | "-> 0x%lx\n" , |
324 | eaddr, avpn, gpte->vpage, gpte->raddr); |
325 | |
326 | /* Update PTE R and C bits, so the guest's swapper knows we used the |
327 | * page */ |
328 | if (gpte->may_read && !(r & HPTE_R_R)) { |
329 | /* |
330 | * Set the accessed flag. |
331 | * We have to write this back with a single byte write |
332 | * because another vcpu may be accessing this on |
333 | * non-PAPR platforms such as mac99, and this is |
334 | * what real hardware does. |
335 | */ |
336 | char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64)); |
337 | r |= HPTE_R_R; |
338 | put_user(r >> 8, addr + 6); |
339 | } |
340 | if (iswrite && gpte->may_write && !(r & HPTE_R_C)) { |
341 | /* Set the dirty flag */ |
342 | /* Use a single byte write */ |
343 | char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64)); |
344 | r |= HPTE_R_C; |
345 | put_user(r, addr + 7); |
346 | } |
347 | |
348 | mutex_unlock(lock: &vcpu->kvm->arch.hpt_mutex); |
349 | |
350 | if (!gpte->may_read || (iswrite && !gpte->may_write)) |
351 | return -EPERM; |
352 | return 0; |
353 | |
354 | no_page_found: |
355 | mutex_unlock(lock: &vcpu->kvm->arch.hpt_mutex); |
356 | return -ENOENT; |
357 | |
358 | no_seg_found: |
359 | dprintk("KVM MMU: Trigger segment fault\n" ); |
360 | return -EINVAL; |
361 | } |
362 | |
363 | static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) |
364 | { |
365 | u64 esid, esid_1t; |
366 | int slb_nr; |
367 | struct kvmppc_slb *slbe; |
368 | |
369 | dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n" , rs, rb); |
370 | |
371 | esid = GET_ESID(rb); |
372 | esid_1t = GET_ESID_1T(rb); |
373 | slb_nr = rb & 0xfff; |
374 | |
375 | if (slb_nr > vcpu->arch.slb_nr) |
376 | return; |
377 | |
378 | slbe = &vcpu->arch.slb[slb_nr]; |
379 | |
380 | slbe->large = (rs & SLB_VSID_L) ? 1 : 0; |
381 | slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; |
382 | slbe->esid = slbe->tb ? esid_1t : esid; |
383 | slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16); |
384 | slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; |
385 | slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0; |
386 | slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0; |
387 | slbe->nx = (rs & SLB_VSID_N) ? 1 : 0; |
388 | slbe->class = (rs & SLB_VSID_C) ? 1 : 0; |
389 | |
390 | slbe->base_page_size = MMU_PAGE_4K; |
391 | if (slbe->large) { |
392 | if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) { |
393 | switch (rs & SLB_VSID_LP) { |
394 | case SLB_VSID_LP_00: |
395 | slbe->base_page_size = MMU_PAGE_16M; |
396 | break; |
397 | case SLB_VSID_LP_01: |
398 | slbe->base_page_size = MMU_PAGE_64K; |
399 | break; |
400 | } |
401 | } else |
402 | slbe->base_page_size = MMU_PAGE_16M; |
403 | } |
404 | |
405 | slbe->orige = rb & (ESID_MASK | SLB_ESID_V); |
406 | slbe->origv = rs; |
407 | |
408 | /* Map the new segment */ |
409 | kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT); |
410 | } |
411 | |
412 | static int kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr, |
413 | ulong *ret_slb) |
414 | { |
415 | struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); |
416 | |
417 | if (slbe) { |
418 | *ret_slb = slbe->origv; |
419 | return 0; |
420 | } |
421 | *ret_slb = 0; |
422 | return -ENOENT; |
423 | } |
424 | |
425 | static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr) |
426 | { |
427 | struct kvmppc_slb *slbe; |
428 | |
429 | if (slb_nr > vcpu->arch.slb_nr) |
430 | return 0; |
431 | |
432 | slbe = &vcpu->arch.slb[slb_nr]; |
433 | |
434 | return slbe->orige; |
435 | } |
436 | |
437 | static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr) |
438 | { |
439 | struct kvmppc_slb *slbe; |
440 | |
441 | if (slb_nr > vcpu->arch.slb_nr) |
442 | return 0; |
443 | |
444 | slbe = &vcpu->arch.slb[slb_nr]; |
445 | |
446 | return slbe->origv; |
447 | } |
448 | |
449 | static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) |
450 | { |
451 | struct kvmppc_slb *slbe; |
452 | u64 seg_size; |
453 | |
454 | dprintk("KVM MMU: slbie(0x%llx)\n" , ea); |
455 | |
456 | slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr: ea); |
457 | |
458 | if (!slbe) |
459 | return; |
460 | |
461 | dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n" , ea, slbe->esid); |
462 | |
463 | slbe->valid = false; |
464 | slbe->orige = 0; |
465 | slbe->origv = 0; |
466 | |
467 | seg_size = 1ull << kvmppc_slb_sid_shift(slbe); |
468 | kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size); |
469 | } |
470 | |
471 | static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) |
472 | { |
473 | int i; |
474 | |
475 | dprintk("KVM MMU: slbia()\n" ); |
476 | |
477 | for (i = 1; i < vcpu->arch.slb_nr; i++) { |
478 | vcpu->arch.slb[i].valid = false; |
479 | vcpu->arch.slb[i].orige = 0; |
480 | vcpu->arch.slb[i].origv = 0; |
481 | } |
482 | |
483 | if (kvmppc_get_msr(vcpu) & MSR_IR) { |
484 | kvmppc_mmu_flush_segments(vcpu); |
485 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
486 | } |
487 | } |
488 | |
489 | static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, |
490 | ulong value) |
491 | { |
492 | u64 rb = 0, rs = 0; |
493 | |
494 | /* |
495 | * According to Book3 2.01 mtsrin is implemented as: |
496 | * |
497 | * The SLB entry specified by (RB)32:35 is loaded from register |
498 | * RS, as follows. |
499 | * |
500 | * SLBE Bit Source SLB Field |
501 | * |
502 | * 0:31 0x0000_0000 ESID-0:31 |
503 | * 32:35 (RB)32:35 ESID-32:35 |
504 | * 36 0b1 V |
505 | * 37:61 0x00_0000|| 0b0 VSID-0:24 |
506 | * 62:88 (RS)37:63 VSID-25:51 |
507 | * 89:91 (RS)33:35 Ks Kp N |
508 | * 92 (RS)36 L ((RS)36 must be 0b0) |
509 | * 93 0b0 C |
510 | */ |
511 | |
512 | dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n" , srnum, value); |
513 | |
514 | /* ESID = srnum */ |
515 | rb |= (srnum & 0xf) << 28; |
516 | /* Set the valid bit */ |
517 | rb |= 1 << 27; |
518 | /* Index = ESID */ |
519 | rb |= srnum; |
520 | |
521 | /* VSID = VSID */ |
522 | rs |= (value & 0xfffffff) << 12; |
523 | /* flags = flags */ |
524 | rs |= ((value >> 28) & 0x7) << 9; |
525 | |
526 | kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb); |
527 | } |
528 | |
529 | static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va, |
530 | bool large) |
531 | { |
532 | u64 mask = 0xFFFFFFFFFULL; |
533 | unsigned long i; |
534 | struct kvm_vcpu *v; |
535 | |
536 | dprintk("KVM MMU: tlbie(0x%lx)\n" , va); |
537 | |
538 | /* |
539 | * The tlbie instruction changed behaviour starting with |
540 | * POWER6. POWER6 and later don't have the large page flag |
541 | * in the instruction but in the RB value, along with bits |
542 | * indicating page and segment sizes. |
543 | */ |
544 | if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) { |
545 | /* POWER6 or later */ |
546 | if (va & 1) { /* L bit */ |
547 | if ((va & 0xf000) == 0x1000) |
548 | mask = 0xFFFFFFFF0ULL; /* 64k page */ |
549 | else |
550 | mask = 0xFFFFFF000ULL; /* 16M page */ |
551 | } |
552 | } else { |
553 | /* older processors, e.g. PPC970 */ |
554 | if (large) |
555 | mask = 0xFFFFFF000ULL; |
556 | } |
557 | /* flush this VA on all vcpus */ |
558 | kvm_for_each_vcpu(i, v, vcpu->kvm) |
559 | kvmppc_mmu_pte_vflush(v, va >> 12, mask); |
560 | } |
561 | |
562 | #ifdef CONFIG_PPC_64K_PAGES |
563 | static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid) |
564 | { |
565 | ulong mp_ea = vcpu->arch.magic_page_ea; |
566 | |
567 | return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) && |
568 | (mp_ea >> SID_SHIFT) == esid; |
569 | } |
570 | #endif |
571 | |
572 | static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, |
573 | u64 *vsid) |
574 | { |
575 | ulong ea = esid << SID_SHIFT; |
576 | struct kvmppc_slb *slb; |
577 | u64 gvsid = esid; |
578 | ulong mp_ea = vcpu->arch.magic_page_ea; |
579 | int pagesize = MMU_PAGE_64K; |
580 | u64 msr = kvmppc_get_msr(vcpu); |
581 | |
582 | if (msr & (MSR_DR|MSR_IR)) { |
583 | slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr: ea); |
584 | if (slb) { |
585 | gvsid = slb->vsid; |
586 | pagesize = slb->base_page_size; |
587 | if (slb->tb) { |
588 | gvsid <<= SID_SHIFT_1T - SID_SHIFT; |
589 | gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1); |
590 | gvsid |= VSID_1T; |
591 | } |
592 | } |
593 | } |
594 | |
595 | switch (msr & (MSR_DR|MSR_IR)) { |
596 | case 0: |
597 | gvsid = VSID_REAL | esid; |
598 | break; |
599 | case MSR_IR: |
600 | gvsid |= VSID_REAL_IR; |
601 | break; |
602 | case MSR_DR: |
603 | gvsid |= VSID_REAL_DR; |
604 | break; |
605 | case MSR_DR|MSR_IR: |
606 | if (!slb) |
607 | goto no_slb; |
608 | |
609 | break; |
610 | default: |
611 | BUG(); |
612 | break; |
613 | } |
614 | |
615 | #ifdef CONFIG_PPC_64K_PAGES |
616 | /* |
617 | * Mark this as a 64k segment if the host is using |
618 | * 64k pages, the host MMU supports 64k pages and |
619 | * the guest segment page size is >= 64k, |
620 | * but not if this segment contains the magic page. |
621 | */ |
622 | if (pagesize >= MMU_PAGE_64K && |
623 | mmu_psize_defs[MMU_PAGE_64K].shift && |
624 | !segment_contains_magic_page(vcpu, esid)) |
625 | gvsid |= VSID_64K; |
626 | #endif |
627 | |
628 | if (kvmppc_get_msr(vcpu) & MSR_PR) |
629 | gvsid |= VSID_PR; |
630 | |
631 | *vsid = gvsid; |
632 | return 0; |
633 | |
634 | no_slb: |
635 | /* Catch magic page case */ |
636 | if (unlikely(mp_ea) && |
637 | unlikely(esid == (mp_ea >> SID_SHIFT)) && |
638 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
639 | *vsid = VSID_REAL | esid; |
640 | return 0; |
641 | } |
642 | |
643 | return -EINVAL; |
644 | } |
645 | |
646 | static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu) |
647 | { |
648 | return (to_book3s(vcpu)->hid[5] & 0x80); |
649 | } |
650 | |
651 | void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu) |
652 | { |
653 | struct kvmppc_mmu *mmu = &vcpu->arch.mmu; |
654 | |
655 | mmu->mfsrin = NULL; |
656 | mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin; |
657 | mmu->slbmte = kvmppc_mmu_book3s_64_slbmte; |
658 | mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee; |
659 | mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev; |
660 | mmu->slbfee = kvmppc_mmu_book3s_64_slbfee; |
661 | mmu->slbie = kvmppc_mmu_book3s_64_slbie; |
662 | mmu->slbia = kvmppc_mmu_book3s_64_slbia; |
663 | mmu->xlate = kvmppc_mmu_book3s_64_xlate; |
664 | mmu->tlbie = kvmppc_mmu_book3s_64_tlbie; |
665 | mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid; |
666 | mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp; |
667 | mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32; |
668 | |
669 | vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; |
670 | } |
671 | |