1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * SN Platform GRU Driver |
4 | * |
5 | * FAULT HANDLER FOR GRU DETECTED TLB MISSES |
6 | * |
7 | * This file contains code that handles TLB misses within the GRU. |
8 | * These misses are reported either via interrupts or user polling of |
9 | * the user CB. |
10 | * |
11 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. |
12 | */ |
13 | |
14 | #include <linux/kernel.h> |
15 | #include <linux/errno.h> |
16 | #include <linux/spinlock.h> |
17 | #include <linux/mm.h> |
18 | #include <linux/hugetlb.h> |
19 | #include <linux/device.h> |
20 | #include <linux/io.h> |
21 | #include <linux/uaccess.h> |
22 | #include <linux/security.h> |
23 | #include <linux/sync_core.h> |
24 | #include <linux/prefetch.h> |
25 | #include "gru.h" |
26 | #include "grutables.h" |
27 | #include "grulib.h" |
28 | #include "gru_instructions.h" |
29 | #include <asm/uv/uv_hub.h> |
30 | |
31 | /* Return codes for vtop functions */ |
32 | #define VTOP_SUCCESS 0 |
33 | #define VTOP_INVALID -1 |
34 | #define VTOP_RETRY -2 |
35 | |
36 | |
37 | /* |
38 | * Test if a physical address is a valid GRU GSEG address |
39 | */ |
40 | static inline int is_gru_paddr(unsigned long paddr) |
41 | { |
42 | return paddr >= gru_start_paddr && paddr < gru_end_paddr; |
43 | } |
44 | |
45 | /* |
46 | * Find the vma of a GRU segment. Caller must hold mmap_lock. |
47 | */ |
48 | struct vm_area_struct *gru_find_vma(unsigned long vaddr) |
49 | { |
50 | struct vm_area_struct *vma; |
51 | |
52 | vma = vma_lookup(current->mm, addr: vaddr); |
53 | if (vma && vma->vm_ops == &gru_vm_ops) |
54 | return vma; |
55 | return NULL; |
56 | } |
57 | |
58 | /* |
59 | * Find and lock the gts that contains the specified user vaddr. |
60 | * |
61 | * Returns: |
62 | * - *gts with the mmap_lock locked for read and the GTS locked. |
63 | * - NULL if vaddr invalid OR is not a valid GSEG vaddr. |
64 | */ |
65 | |
66 | static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr) |
67 | { |
68 | struct mm_struct *mm = current->mm; |
69 | struct vm_area_struct *vma; |
70 | struct gru_thread_state *gts = NULL; |
71 | |
72 | mmap_read_lock(mm); |
73 | vma = gru_find_vma(vaddr); |
74 | if (vma) |
75 | gts = gru_find_thread_state(vma, TSID(vaddr, vma)); |
76 | if (gts) |
77 | mutex_lock(>s->ts_ctxlock); |
78 | else |
79 | mmap_read_unlock(mm); |
80 | return gts; |
81 | } |
82 | |
83 | static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr) |
84 | { |
85 | struct mm_struct *mm = current->mm; |
86 | struct vm_area_struct *vma; |
87 | struct gru_thread_state *gts = ERR_PTR(error: -EINVAL); |
88 | |
89 | mmap_write_lock(mm); |
90 | vma = gru_find_vma(vaddr); |
91 | if (!vma) |
92 | goto err; |
93 | |
94 | gts = gru_alloc_thread_state(vma, TSID(vaddr, vma)); |
95 | if (IS_ERR(ptr: gts)) |
96 | goto err; |
97 | mutex_lock(>s->ts_ctxlock); |
98 | mmap_write_downgrade(mm); |
99 | return gts; |
100 | |
101 | err: |
102 | mmap_write_unlock(mm); |
103 | return gts; |
104 | } |
105 | |
106 | /* |
107 | * Unlock a GTS that was previously locked with gru_find_lock_gts(). |
108 | */ |
109 | static void gru_unlock_gts(struct gru_thread_state *gts) |
110 | { |
111 | mutex_unlock(lock: >s->ts_ctxlock); |
112 | mmap_read_unlock(current->mm); |
113 | } |
114 | |
115 | /* |
116 | * Set a CB.istatus to active using a user virtual address. This must be done |
117 | * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY. |
118 | * If the line is evicted, the status may be lost. The in-cache update |
119 | * is necessary to prevent the user from seeing a stale cb.istatus that will |
120 | * change as soon as the TFH restart is complete. Races may cause an |
121 | * occasional failure to clear the cb.istatus, but that is ok. |
122 | */ |
123 | static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk) |
124 | { |
125 | if (cbk) { |
126 | cbk->istatus = CBS_ACTIVE; |
127 | } |
128 | } |
129 | |
130 | /* |
131 | * Read & clear a TFM |
132 | * |
133 | * The GRU has an array of fault maps. A map is private to a cpu |
134 | * Only one cpu will be accessing a cpu's fault map. |
135 | * |
136 | * This function scans the cpu-private fault map & clears all bits that |
137 | * are set. The function returns a bitmap that indicates the bits that |
138 | * were cleared. Note that sense the maps may be updated asynchronously by |
139 | * the GRU, atomic operations must be used to clear bits. |
140 | */ |
141 | static void get_clear_fault_map(struct gru_state *gru, |
142 | struct gru_tlb_fault_map *imap, |
143 | struct gru_tlb_fault_map *dmap) |
144 | { |
145 | unsigned long i, k; |
146 | struct gru_tlb_fault_map *tfm; |
147 | |
148 | tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id()); |
149 | prefetchw(x: tfm); /* Helps on hardware, required for emulator */ |
150 | for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) { |
151 | k = tfm->fault_bits[i]; |
152 | if (k) |
153 | k = xchg(&tfm->fault_bits[i], 0UL); |
154 | imap->fault_bits[i] = k; |
155 | k = tfm->done_bits[i]; |
156 | if (k) |
157 | k = xchg(&tfm->done_bits[i], 0UL); |
158 | dmap->fault_bits[i] = k; |
159 | } |
160 | |
161 | /* |
162 | * Not functionally required but helps performance. (Required |
163 | * on emulator) |
164 | */ |
165 | gru_flush_cache(p: tfm); |
166 | } |
167 | |
168 | /* |
169 | * Atomic (interrupt context) & non-atomic (user context) functions to |
170 | * convert a vaddr into a physical address. The size of the page |
171 | * is returned in pageshift. |
172 | * returns: |
173 | * 0 - successful |
174 | * < 0 - error code |
175 | * 1 - (atomic only) try again in non-atomic context |
176 | */ |
177 | static int non_atomic_pte_lookup(struct vm_area_struct *vma, |
178 | unsigned long vaddr, int write, |
179 | unsigned long *paddr, int *pageshift) |
180 | { |
181 | struct page *page; |
182 | |
183 | #ifdef CONFIG_HUGETLB_PAGE |
184 | *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT; |
185 | #else |
186 | *pageshift = PAGE_SHIFT; |
187 | #endif |
188 | if (get_user_pages(start: vaddr, nr_pages: 1, gup_flags: write ? FOLL_WRITE : 0, pages: &page) <= 0) |
189 | return -EFAULT; |
190 | *paddr = page_to_phys(page); |
191 | put_page(page); |
192 | return 0; |
193 | } |
194 | |
195 | /* |
196 | * atomic_pte_lookup |
197 | * |
198 | * Convert a user virtual address to a physical address |
199 | * Only supports Intel large pages (2MB only) on x86_64. |
200 | * ZZZ - hugepage support is incomplete |
201 | * |
202 | * NOTE: mmap_lock is already held on entry to this function. This |
203 | * guarantees existence of the page tables. |
204 | */ |
205 | static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, |
206 | int write, unsigned long *paddr, int *pageshift) |
207 | { |
208 | pgd_t *pgdp; |
209 | p4d_t *p4dp; |
210 | pud_t *pudp; |
211 | pmd_t *pmdp; |
212 | pte_t pte; |
213 | |
214 | pgdp = pgd_offset(vma->vm_mm, vaddr); |
215 | if (unlikely(pgd_none(*pgdp))) |
216 | goto err; |
217 | |
218 | p4dp = p4d_offset(pgd: pgdp, address: vaddr); |
219 | if (unlikely(p4d_none(*p4dp))) |
220 | goto err; |
221 | |
222 | pudp = pud_offset(p4d: p4dp, address: vaddr); |
223 | if (unlikely(pud_none(*pudp))) |
224 | goto err; |
225 | |
226 | pmdp = pmd_offset(pud: pudp, address: vaddr); |
227 | if (unlikely(pmd_none(*pmdp))) |
228 | goto err; |
229 | #ifdef CONFIG_X86_64 |
230 | if (unlikely(pmd_large(*pmdp))) |
231 | pte = ptep_get(ptep: (pte_t *)pmdp); |
232 | else |
233 | #endif |
234 | pte = *pte_offset_kernel(pmd: pmdp, address: vaddr); |
235 | |
236 | if (unlikely(!pte_present(pte) || |
237 | (write && (!pte_write(pte) || !pte_dirty(pte))))) |
238 | return 1; |
239 | |
240 | *paddr = pte_pfn(pte) << PAGE_SHIFT; |
241 | #ifdef CONFIG_HUGETLB_PAGE |
242 | *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT; |
243 | #else |
244 | *pageshift = PAGE_SHIFT; |
245 | #endif |
246 | return 0; |
247 | |
248 | err: |
249 | return 1; |
250 | } |
251 | |
252 | static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, |
253 | int write, int atomic, unsigned long *gpa, int *pageshift) |
254 | { |
255 | struct mm_struct *mm = gts->ts_mm; |
256 | struct vm_area_struct *vma; |
257 | unsigned long paddr; |
258 | int ret, ps; |
259 | |
260 | vma = find_vma(mm, addr: vaddr); |
261 | if (!vma) |
262 | goto inval; |
263 | |
264 | /* |
265 | * Atomic lookup is faster & usually works even if called in non-atomic |
266 | * context. |
267 | */ |
268 | rmb(); /* Must/check ms_range_active before loading PTEs */ |
269 | ret = atomic_pte_lookup(vma, vaddr, write, paddr: &paddr, pageshift: &ps); |
270 | if (ret) { |
271 | if (atomic) |
272 | goto upm; |
273 | if (non_atomic_pte_lookup(vma, vaddr, write, paddr: &paddr, pageshift: &ps)) |
274 | goto inval; |
275 | } |
276 | if (is_gru_paddr(paddr)) |
277 | goto inval; |
278 | paddr = paddr & ~((1UL << ps) - 1); |
279 | *gpa = uv_soc_phys_ram_to_gpa(paddr); |
280 | *pageshift = ps; |
281 | return VTOP_SUCCESS; |
282 | |
283 | inval: |
284 | return VTOP_INVALID; |
285 | upm: |
286 | return VTOP_RETRY; |
287 | } |
288 | |
289 | |
290 | /* |
291 | * Flush a CBE from cache. The CBE is clean in the cache. Dirty the |
292 | * CBE cacheline so that the line will be written back to home agent. |
293 | * Otherwise the line may be silently dropped. This has no impact |
294 | * except on performance. |
295 | */ |
296 | static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe) |
297 | { |
298 | if (unlikely(cbe)) { |
299 | cbe->cbrexecstatus = 0; /* make CL dirty */ |
300 | gru_flush_cache(p: cbe); |
301 | } |
302 | } |
303 | |
304 | /* |
305 | * Preload the TLB with entries that may be required. Currently, preloading |
306 | * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to |
307 | * the end of the bcopy tranfer, whichever is smaller. |
308 | */ |
309 | static void gru_preload_tlb(struct gru_state *gru, |
310 | struct gru_thread_state *gts, int atomic, |
311 | unsigned long fault_vaddr, int asid, int write, |
312 | unsigned char tlb_preload_count, |
313 | struct gru_tlb_fault_handle *tfh, |
314 | struct gru_control_block_extended *cbe) |
315 | { |
316 | unsigned long vaddr = 0, gpa; |
317 | int ret, pageshift; |
318 | |
319 | if (cbe->opccpy != OP_BCOPY) |
320 | return; |
321 | |
322 | if (fault_vaddr == cbe->cbe_baddr0) |
323 | vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1; |
324 | else if (fault_vaddr == cbe->cbe_baddr1) |
325 | vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1; |
326 | |
327 | fault_vaddr &= PAGE_MASK; |
328 | vaddr &= PAGE_MASK; |
329 | vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE); |
330 | |
331 | while (vaddr > fault_vaddr) { |
332 | ret = gru_vtop(gts, vaddr, write, atomic, gpa: &gpa, pageshift: &pageshift); |
333 | if (ret || tfh_write_only(tfh, paddr: gpa, GAA_RAM, vaddr, asid, dirty: write, |
334 | GRU_PAGESIZE(pageshift))) |
335 | return; |
336 | gru_dbg(grudev, |
337 | "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n" , |
338 | atomic ? "atomic" : "non-atomic" , gru->gs_gid, gts, tfh, |
339 | vaddr, asid, write, pageshift, gpa); |
340 | vaddr -= PAGE_SIZE; |
341 | STAT(tlb_preload_page); |
342 | } |
343 | } |
344 | |
345 | /* |
346 | * Drop a TLB entry into the GRU. The fault is described by info in an TFH. |
347 | * Input: |
348 | * cb Address of user CBR. Null if not running in user context |
349 | * Return: |
350 | * 0 = dropin, exception, or switch to UPM successful |
351 | * 1 = range invalidate active |
352 | * < 0 = error code |
353 | * |
354 | */ |
355 | static int gru_try_dropin(struct gru_state *gru, |
356 | struct gru_thread_state *gts, |
357 | struct gru_tlb_fault_handle *tfh, |
358 | struct gru_instruction_bits *cbk) |
359 | { |
360 | struct gru_control_block_extended *cbe = NULL; |
361 | unsigned char tlb_preload_count = gts->ts_tlb_preload_count; |
362 | int pageshift = 0, asid, write, ret, atomic = !cbk, indexway; |
363 | unsigned long gpa = 0, vaddr = 0; |
364 | |
365 | /* |
366 | * NOTE: The GRU contains magic hardware that eliminates races between |
367 | * TLB invalidates and TLB dropins. If an invalidate occurs |
368 | * in the window between reading the TFH and the subsequent TLB dropin, |
369 | * the dropin is ignored. This eliminates the need for additional locks. |
370 | */ |
371 | |
372 | /* |
373 | * Prefetch the CBE if doing TLB preloading |
374 | */ |
375 | if (unlikely(tlb_preload_count)) { |
376 | cbe = gru_tfh_to_cbe(tfh); |
377 | prefetchw(x: cbe); |
378 | } |
379 | |
380 | /* |
381 | * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call. |
382 | * Might be a hardware race OR a stupid user. Ignore FMM because FMM |
383 | * is a transient state. |
384 | */ |
385 | if (tfh->status != TFHSTATUS_EXCEPTION) { |
386 | gru_flush_cache(p: tfh); |
387 | sync_core(); |
388 | if (tfh->status != TFHSTATUS_EXCEPTION) |
389 | goto failnoexception; |
390 | STAT(tfh_stale_on_fault); |
391 | } |
392 | if (tfh->state == TFHSTATE_IDLE) |
393 | goto failidle; |
394 | if (tfh->state == TFHSTATE_MISS_FMM && cbk) |
395 | goto failfmm; |
396 | |
397 | write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0; |
398 | vaddr = tfh->missvaddr; |
399 | asid = tfh->missasid; |
400 | indexway = tfh->indexway; |
401 | if (asid == 0) |
402 | goto failnoasid; |
403 | |
404 | rmb(); /* TFH must be cache resident before reading ms_range_active */ |
405 | |
406 | /* |
407 | * TFH is cache resident - at least briefly. Fail the dropin |
408 | * if a range invalidate is active. |
409 | */ |
410 | if (atomic_read(v: >s->ts_gms->ms_range_active)) |
411 | goto failactive; |
412 | |
413 | ret = gru_vtop(gts, vaddr, write, atomic, gpa: &gpa, pageshift: &pageshift); |
414 | if (ret == VTOP_INVALID) |
415 | goto failinval; |
416 | if (ret == VTOP_RETRY) |
417 | goto failupm; |
418 | |
419 | if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) { |
420 | gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift); |
421 | if (atomic || !gru_update_cch(gts)) { |
422 | gts->ts_force_cch_reload = 1; |
423 | goto failupm; |
424 | } |
425 | } |
426 | |
427 | if (unlikely(cbe) && pageshift == PAGE_SHIFT) { |
428 | gru_preload_tlb(gru, gts, atomic, fault_vaddr: vaddr, asid, write, tlb_preload_count, tfh, cbe); |
429 | gru_flush_cache_cbe(cbe); |
430 | } |
431 | |
432 | gru_cb_set_istatus_active(cbk); |
433 | gts->ustats.tlbdropin++; |
434 | tfh_write_restart(tfh, paddr: gpa, GAA_RAM, vaddr, asid, dirty: write, |
435 | GRU_PAGESIZE(pageshift)); |
436 | gru_dbg(grudev, |
437 | "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x," |
438 | " rw %d, ps %d, gpa 0x%lx\n" , |
439 | atomic ? "atomic" : "non-atomic" , gru->gs_gid, gts, tfh, vaddr, asid, |
440 | indexway, write, pageshift, gpa); |
441 | STAT(tlb_dropin); |
442 | return 0; |
443 | |
444 | failnoasid: |
445 | /* No asid (delayed unload). */ |
446 | STAT(tlb_dropin_fail_no_asid); |
447 | gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n" , tfh, vaddr); |
448 | if (!cbk) |
449 | tfh_user_polling_mode(tfh); |
450 | else |
451 | gru_flush_cache(p: tfh); |
452 | gru_flush_cache_cbe(cbe); |
453 | return -EAGAIN; |
454 | |
455 | failupm: |
456 | /* Atomic failure switch CBR to UPM */ |
457 | tfh_user_polling_mode(tfh); |
458 | gru_flush_cache_cbe(cbe); |
459 | STAT(tlb_dropin_fail_upm); |
460 | gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n" , tfh, vaddr); |
461 | return 1; |
462 | |
463 | failfmm: |
464 | /* FMM state on UPM call */ |
465 | gru_flush_cache(p: tfh); |
466 | gru_flush_cache_cbe(cbe); |
467 | STAT(tlb_dropin_fail_fmm); |
468 | gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n" , tfh, tfh->state); |
469 | return 0; |
470 | |
471 | failnoexception: |
472 | /* TFH status did not show exception pending */ |
473 | gru_flush_cache(p: tfh); |
474 | gru_flush_cache_cbe(cbe); |
475 | if (cbk) |
476 | gru_flush_cache(p: cbk); |
477 | STAT(tlb_dropin_fail_no_exception); |
478 | gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n" , |
479 | tfh, tfh->status, tfh->state); |
480 | return 0; |
481 | |
482 | failidle: |
483 | /* TFH state was idle - no miss pending */ |
484 | gru_flush_cache(p: tfh); |
485 | gru_flush_cache_cbe(cbe); |
486 | if (cbk) |
487 | gru_flush_cache(p: cbk); |
488 | STAT(tlb_dropin_fail_idle); |
489 | gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n" , tfh, tfh->state); |
490 | return 0; |
491 | |
492 | failinval: |
493 | /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */ |
494 | tfh_exception(tfh); |
495 | gru_flush_cache_cbe(cbe); |
496 | STAT(tlb_dropin_fail_invalid); |
497 | gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n" , tfh, vaddr); |
498 | return -EFAULT; |
499 | |
500 | failactive: |
501 | /* Range invalidate active. Switch to UPM iff atomic */ |
502 | if (!cbk) |
503 | tfh_user_polling_mode(tfh); |
504 | else |
505 | gru_flush_cache(p: tfh); |
506 | gru_flush_cache_cbe(cbe); |
507 | STAT(tlb_dropin_fail_range_active); |
508 | gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n" , |
509 | tfh, vaddr); |
510 | return 1; |
511 | } |
512 | |
513 | /* |
514 | * Process an external interrupt from the GRU. This interrupt is |
515 | * caused by a TLB miss. |
516 | * Note that this is the interrupt handler that is registered with linux |
517 | * interrupt handlers. |
518 | */ |
519 | static irqreturn_t gru_intr(int chiplet, int blade) |
520 | { |
521 | struct gru_state *gru; |
522 | struct gru_tlb_fault_map imap, dmap; |
523 | struct gru_thread_state *gts; |
524 | struct gru_tlb_fault_handle *tfh = NULL; |
525 | struct completion *cmp; |
526 | int cbrnum, ctxnum; |
527 | |
528 | STAT(intr); |
529 | |
530 | gru = &gru_base[blade]->bs_grus[chiplet]; |
531 | if (!gru) { |
532 | dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n" , |
533 | raw_smp_processor_id(), chiplet); |
534 | return IRQ_NONE; |
535 | } |
536 | get_clear_fault_map(gru, imap: &imap, dmap: &dmap); |
537 | gru_dbg(grudev, |
538 | "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n" , |
539 | smp_processor_id(), chiplet, gru->gs_gid, |
540 | imap.fault_bits[0], imap.fault_bits[1], |
541 | dmap.fault_bits[0], dmap.fault_bits[1]); |
542 | |
543 | for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) { |
544 | STAT(intr_cbr); |
545 | cmp = gru->gs_blade->bs_async_wq; |
546 | if (cmp) |
547 | complete(cmp); |
548 | gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n" , |
549 | gru->gs_gid, cbrnum, cmp ? cmp->done : -1); |
550 | } |
551 | |
552 | for_each_cbr_in_tfm(cbrnum, imap.fault_bits) { |
553 | STAT(intr_tfh); |
554 | tfh = get_tfh_by_index(gru, cbrnum); |
555 | prefetchw(x: tfh); /* Helps on hdw, required for emulator */ |
556 | |
557 | /* |
558 | * When hardware sets a bit in the faultmap, it implicitly |
559 | * locks the GRU context so that it cannot be unloaded. |
560 | * The gts cannot change until a TFH start/writestart command |
561 | * is issued. |
562 | */ |
563 | ctxnum = tfh->ctxnum; |
564 | gts = gru->gs_gts[ctxnum]; |
565 | |
566 | /* Spurious interrupts can cause this. Ignore. */ |
567 | if (!gts) { |
568 | STAT(intr_spurious); |
569 | continue; |
570 | } |
571 | |
572 | /* |
573 | * This is running in interrupt context. Trylock the mmap_lock. |
574 | * If it fails, retry the fault in user context. |
575 | */ |
576 | gts->ustats.fmm_tlbmiss++; |
577 | if (!gts->ts_force_cch_reload && |
578 | mmap_read_trylock(mm: gts->ts_mm)) { |
579 | gru_try_dropin(gru, gts, tfh, NULL); |
580 | mmap_read_unlock(mm: gts->ts_mm); |
581 | } else { |
582 | tfh_user_polling_mode(tfh); |
583 | STAT(intr_mm_lock_failed); |
584 | } |
585 | } |
586 | return IRQ_HANDLED; |
587 | } |
588 | |
589 | irqreturn_t gru0_intr(int irq, void *dev_id) |
590 | { |
591 | return gru_intr(chiplet: 0, blade: uv_numa_blade_id()); |
592 | } |
593 | |
594 | irqreturn_t gru1_intr(int irq, void *dev_id) |
595 | { |
596 | return gru_intr(chiplet: 1, blade: uv_numa_blade_id()); |
597 | } |
598 | |
599 | irqreturn_t gru_intr_mblade(int irq, void *dev_id) |
600 | { |
601 | int blade; |
602 | |
603 | for_each_possible_blade(blade) { |
604 | if (uv_blade_nr_possible_cpus(bid: blade)) |
605 | continue; |
606 | gru_intr(chiplet: 0, blade); |
607 | gru_intr(chiplet: 1, blade); |
608 | } |
609 | return IRQ_HANDLED; |
610 | } |
611 | |
612 | |
613 | static int gru_user_dropin(struct gru_thread_state *gts, |
614 | struct gru_tlb_fault_handle *tfh, |
615 | void *cb) |
616 | { |
617 | struct gru_mm_struct *gms = gts->ts_gms; |
618 | int ret; |
619 | |
620 | gts->ustats.upm_tlbmiss++; |
621 | while (1) { |
622 | wait_event(gms->ms_wait_queue, |
623 | atomic_read(&gms->ms_range_active) == 0); |
624 | prefetchw(x: tfh); /* Helps on hdw, required for emulator */ |
625 | ret = gru_try_dropin(gru: gts->ts_gru, gts, tfh, cbk: cb); |
626 | if (ret <= 0) |
627 | return ret; |
628 | STAT(call_os_wait_queue); |
629 | } |
630 | } |
631 | |
632 | /* |
633 | * This interface is called as a result of a user detecting a "call OS" bit |
634 | * in a user CB. Normally means that a TLB fault has occurred. |
635 | * cb - user virtual address of the CB |
636 | */ |
637 | int gru_handle_user_call_os(unsigned long cb) |
638 | { |
639 | struct gru_tlb_fault_handle *tfh; |
640 | struct gru_thread_state *gts; |
641 | void *cbk; |
642 | int ucbnum, cbrnum, ret = -EINVAL; |
643 | |
644 | STAT(call_os); |
645 | |
646 | /* sanity check the cb pointer */ |
647 | ucbnum = get_cb_number(cb: (void *)cb); |
648 | if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB) |
649 | return -EINVAL; |
650 | |
651 | again: |
652 | gts = gru_find_lock_gts(vaddr: cb); |
653 | if (!gts) |
654 | return -EINVAL; |
655 | gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n" , cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts); |
656 | |
657 | if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) |
658 | goto exit; |
659 | |
660 | if (gru_check_context_placement(gts)) { |
661 | gru_unlock_gts(gts); |
662 | gru_unload_context(gts, savestate: 1); |
663 | goto again; |
664 | } |
665 | |
666 | /* |
667 | * CCH may contain stale data if ts_force_cch_reload is set. |
668 | */ |
669 | if (gts->ts_gru && gts->ts_force_cch_reload) { |
670 | gts->ts_force_cch_reload = 0; |
671 | gru_update_cch(gts); |
672 | } |
673 | |
674 | ret = -EAGAIN; |
675 | cbrnum = thread_cbr_number(gts, ucbnum); |
676 | if (gts->ts_gru) { |
677 | tfh = get_tfh_by_index(gts->ts_gru, cbrnum); |
678 | cbk = get_gseg_base_address_cb(base: gts->ts_gru->gs_gru_base_vaddr, |
679 | ctxnum: gts->ts_ctxnum, line: ucbnum); |
680 | ret = gru_user_dropin(gts, tfh, cb: cbk); |
681 | } |
682 | exit: |
683 | gru_unlock_gts(gts); |
684 | return ret; |
685 | } |
686 | |
687 | /* |
688 | * Fetch the exception detail information for a CB that terminated with |
689 | * an exception. |
690 | */ |
691 | int gru_get_exception_detail(unsigned long arg) |
692 | { |
693 | struct control_block_extended_exc_detail excdet; |
694 | struct gru_control_block_extended *cbe; |
695 | struct gru_thread_state *gts; |
696 | int ucbnum, cbrnum, ret; |
697 | |
698 | STAT(user_exception); |
699 | if (copy_from_user(to: &excdet, from: (void __user *)arg, n: sizeof(excdet))) |
700 | return -EFAULT; |
701 | |
702 | gts = gru_find_lock_gts(vaddr: excdet.cb); |
703 | if (!gts) |
704 | return -EINVAL; |
705 | |
706 | gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n" , excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts); |
707 | ucbnum = get_cb_number(cb: (void *)excdet.cb); |
708 | if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) { |
709 | ret = -EINVAL; |
710 | } else if (gts->ts_gru) { |
711 | cbrnum = thread_cbr_number(gts, ucbnum); |
712 | cbe = get_cbe_by_index(gts->ts_gru, cbrnum); |
713 | gru_flush_cache(p: cbe); /* CBE not coherent */ |
714 | sync_core(); /* make sure we are have current data */ |
715 | excdet.opc = cbe->opccpy; |
716 | excdet.exopc = cbe->exopccpy; |
717 | excdet.ecause = cbe->ecause; |
718 | excdet.exceptdet0 = cbe->idef1upd; |
719 | excdet.exceptdet1 = cbe->idef3upd; |
720 | excdet.cbrstate = cbe->cbrstate; |
721 | excdet.cbrexecstatus = cbe->cbrexecstatus; |
722 | gru_flush_cache_cbe(cbe); |
723 | ret = 0; |
724 | } else { |
725 | ret = -EAGAIN; |
726 | } |
727 | gru_unlock_gts(gts); |
728 | |
729 | gru_dbg(grudev, |
730 | "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, " |
731 | "exdet0 0x%lx, exdet1 0x%x\n" , |
732 | excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus, |
733 | excdet.ecause, excdet.exceptdet0, excdet.exceptdet1); |
734 | if (!ret && copy_to_user(to: (void __user *)arg, from: &excdet, n: sizeof(excdet))) |
735 | ret = -EFAULT; |
736 | return ret; |
737 | } |
738 | |
739 | /* |
740 | * User request to unload a context. Content is saved for possible reload. |
741 | */ |
742 | static int gru_unload_all_contexts(void) |
743 | { |
744 | struct gru_thread_state *gts; |
745 | struct gru_state *gru; |
746 | int gid, ctxnum; |
747 | |
748 | if (!capable(CAP_SYS_ADMIN)) |
749 | return -EPERM; |
750 | foreach_gid(gid) { |
751 | gru = GID_TO_GRU(gid); |
752 | spin_lock(lock: &gru->gs_lock); |
753 | for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) { |
754 | gts = gru->gs_gts[ctxnum]; |
755 | if (gts && mutex_trylock(lock: >s->ts_ctxlock)) { |
756 | spin_unlock(lock: &gru->gs_lock); |
757 | gru_unload_context(gts, savestate: 1); |
758 | mutex_unlock(lock: >s->ts_ctxlock); |
759 | spin_lock(lock: &gru->gs_lock); |
760 | } |
761 | } |
762 | spin_unlock(lock: &gru->gs_lock); |
763 | } |
764 | return 0; |
765 | } |
766 | |
767 | int gru_user_unload_context(unsigned long arg) |
768 | { |
769 | struct gru_thread_state *gts; |
770 | struct gru_unload_context_req req; |
771 | |
772 | STAT(user_unload_context); |
773 | if (copy_from_user(to: &req, from: (void __user *)arg, n: sizeof(req))) |
774 | return -EFAULT; |
775 | |
776 | gru_dbg(grudev, "gseg 0x%lx\n" , req.gseg); |
777 | |
778 | if (!req.gseg) |
779 | return gru_unload_all_contexts(); |
780 | |
781 | gts = gru_find_lock_gts(vaddr: req.gseg); |
782 | if (!gts) |
783 | return -EINVAL; |
784 | |
785 | if (gts->ts_gru) |
786 | gru_unload_context(gts, savestate: 1); |
787 | gru_unlock_gts(gts); |
788 | |
789 | return 0; |
790 | } |
791 | |
792 | /* |
793 | * User request to flush a range of virtual addresses from the GRU TLB |
794 | * (Mainly for testing). |
795 | */ |
796 | int gru_user_flush_tlb(unsigned long arg) |
797 | { |
798 | struct gru_thread_state *gts; |
799 | struct gru_flush_tlb_req req; |
800 | struct gru_mm_struct *gms; |
801 | |
802 | STAT(user_flush_tlb); |
803 | if (copy_from_user(to: &req, from: (void __user *)arg, n: sizeof(req))) |
804 | return -EFAULT; |
805 | |
806 | gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n" , req.gseg, |
807 | req.vaddr, req.len); |
808 | |
809 | gts = gru_find_lock_gts(vaddr: req.gseg); |
810 | if (!gts) |
811 | return -EINVAL; |
812 | |
813 | gms = gts->ts_gms; |
814 | gru_unlock_gts(gts); |
815 | gru_flush_tlb_range(gms, start: req.vaddr, len: req.len); |
816 | |
817 | return 0; |
818 | } |
819 | |
820 | /* |
821 | * Fetch GSEG statisticss |
822 | */ |
823 | long gru_get_gseg_statistics(unsigned long arg) |
824 | { |
825 | struct gru_thread_state *gts; |
826 | struct gru_get_gseg_statistics_req req; |
827 | |
828 | if (copy_from_user(to: &req, from: (void __user *)arg, n: sizeof(req))) |
829 | return -EFAULT; |
830 | |
831 | /* |
832 | * The library creates arrays of contexts for threaded programs. |
833 | * If no gts exists in the array, the context has never been used & all |
834 | * statistics are implicitly 0. |
835 | */ |
836 | gts = gru_find_lock_gts(vaddr: req.gseg); |
837 | if (gts) { |
838 | memcpy(&req.stats, >s->ustats, sizeof(gts->ustats)); |
839 | gru_unlock_gts(gts); |
840 | } else { |
841 | memset(&req.stats, 0, sizeof(gts->ustats)); |
842 | } |
843 | |
844 | if (copy_to_user(to: (void __user *)arg, from: &req, n: sizeof(req))) |
845 | return -EFAULT; |
846 | |
847 | return 0; |
848 | } |
849 | |
850 | /* |
851 | * Register the current task as the user of the GSEG slice. |
852 | * Needed for TLB fault interrupt targeting. |
853 | */ |
854 | int gru_set_context_option(unsigned long arg) |
855 | { |
856 | struct gru_thread_state *gts; |
857 | struct gru_set_context_option_req req; |
858 | int ret = 0; |
859 | |
860 | STAT(set_context_option); |
861 | if (copy_from_user(to: &req, from: (void __user *)arg, n: sizeof(req))) |
862 | return -EFAULT; |
863 | gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n" , req.op, req.gseg, req.val1); |
864 | |
865 | gts = gru_find_lock_gts(vaddr: req.gseg); |
866 | if (!gts) { |
867 | gts = gru_alloc_locked_gts(vaddr: req.gseg); |
868 | if (IS_ERR(ptr: gts)) |
869 | return PTR_ERR(ptr: gts); |
870 | } |
871 | |
872 | switch (req.op) { |
873 | case sco_blade_chiplet: |
874 | /* Select blade/chiplet for GRU context */ |
875 | if (req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB || |
876 | req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || |
877 | (req.val1 >= 0 && !gru_base[req.val1])) { |
878 | ret = -EINVAL; |
879 | } else { |
880 | gts->ts_user_blade_id = req.val1; |
881 | gts->ts_user_chiplet_id = req.val0; |
882 | if (gru_check_context_placement(gts)) { |
883 | gru_unlock_gts(gts); |
884 | gru_unload_context(gts, savestate: 1); |
885 | return ret; |
886 | } |
887 | } |
888 | break; |
889 | case sco_gseg_owner: |
890 | /* Register the current task as the GSEG owner */ |
891 | gts->ts_tgid_owner = current->tgid; |
892 | break; |
893 | case sco_cch_req_slice: |
894 | /* Set the CCH slice option */ |
895 | gts->ts_cch_req_slice = req.val1 & 3; |
896 | break; |
897 | default: |
898 | ret = -EINVAL; |
899 | } |
900 | gru_unlock_gts(gts); |
901 | |
902 | return ret; |
903 | } |
904 | |