1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ |
3 | |
4 | #include <drm/panfrost_drm.h> |
5 | |
6 | #include <linux/atomic.h> |
7 | #include <linux/bitfield.h> |
8 | #include <linux/delay.h> |
9 | #include <linux/dma-mapping.h> |
10 | #include <linux/interrupt.h> |
11 | #include <linux/io.h> |
12 | #include <linux/iopoll.h> |
13 | #include <linux/io-pgtable.h> |
14 | #include <linux/iommu.h> |
15 | #include <linux/platform_device.h> |
16 | #include <linux/pm_runtime.h> |
17 | #include <linux/shmem_fs.h> |
18 | #include <linux/sizes.h> |
19 | |
20 | #include "panfrost_device.h" |
21 | #include "panfrost_mmu.h" |
22 | #include "panfrost_gem.h" |
23 | #include "panfrost_features.h" |
24 | #include "panfrost_regs.h" |
25 | |
26 | #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg) |
27 | #define mmu_read(dev, reg) readl(dev->iomem + reg) |
28 | |
29 | static int wait_ready(struct panfrost_device *pfdev, u32 as_nr) |
30 | { |
31 | int ret; |
32 | u32 val; |
33 | |
34 | /* Wait for the MMU status to indicate there is no active command, in |
35 | * case one is pending. */ |
36 | ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr), |
37 | val, !(val & AS_STATUS_AS_ACTIVE), 10, 100000); |
38 | |
39 | if (ret) { |
40 | /* The GPU hung, let's trigger a reset */ |
41 | panfrost_device_schedule_reset(pfdev); |
42 | dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n" ); |
43 | } |
44 | |
45 | return ret; |
46 | } |
47 | |
48 | static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd) |
49 | { |
50 | int status; |
51 | |
52 | /* write AS_COMMAND when MMU is ready to accept another command */ |
53 | status = wait_ready(pfdev, as_nr); |
54 | if (!status) |
55 | mmu_write(pfdev, AS_COMMAND(as_nr), cmd); |
56 | |
57 | return status; |
58 | } |
59 | |
60 | static void lock_region(struct panfrost_device *pfdev, u32 as_nr, |
61 | u64 region_start, u64 size) |
62 | { |
63 | u8 region_width; |
64 | u64 region; |
65 | u64 region_end = region_start + size; |
66 | |
67 | if (!size) |
68 | return; |
69 | |
70 | /* |
71 | * The locked region is a naturally aligned power of 2 block encoded as |
72 | * log2 minus(1). |
73 | * Calculate the desired start/end and look for the highest bit which |
74 | * differs. The smallest naturally aligned block must include this bit |
75 | * change, the desired region starts with this bit (and subsequent bits) |
76 | * zeroed and ends with the bit (and subsequent bits) set to one. |
77 | */ |
78 | region_width = max(fls64(region_start ^ (region_end - 1)), |
79 | const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1; |
80 | |
81 | /* |
82 | * Mask off the low bits of region_start (which would be ignored by |
83 | * the hardware anyway) |
84 | */ |
85 | region_start &= GENMASK_ULL(63, region_width); |
86 | |
87 | region = region_width | region_start; |
88 | |
89 | /* Lock the region that needs to be updated */ |
90 | mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region)); |
91 | mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region)); |
92 | write_cmd(pfdev, as_nr, AS_COMMAND_LOCK); |
93 | } |
94 | |
95 | |
96 | static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr, |
97 | u64 iova, u64 size, u32 op) |
98 | { |
99 | if (as_nr < 0) |
100 | return 0; |
101 | |
102 | if (op != AS_COMMAND_UNLOCK) |
103 | lock_region(pfdev, as_nr, region_start: iova, size); |
104 | |
105 | /* Run the MMU operation */ |
106 | write_cmd(pfdev, as_nr, cmd: op); |
107 | |
108 | /* Wait for the flush to complete */ |
109 | return wait_ready(pfdev, as_nr); |
110 | } |
111 | |
112 | static int mmu_hw_do_operation(struct panfrost_device *pfdev, |
113 | struct panfrost_mmu *mmu, |
114 | u64 iova, u64 size, u32 op) |
115 | { |
116 | int ret; |
117 | |
118 | spin_lock(lock: &pfdev->as_lock); |
119 | ret = mmu_hw_do_operation_locked(pfdev, as_nr: mmu->as, iova, size, op); |
120 | spin_unlock(lock: &pfdev->as_lock); |
121 | return ret; |
122 | } |
123 | |
124 | static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) |
125 | { |
126 | int as_nr = mmu->as; |
127 | struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg; |
128 | u64 transtab = cfg->arm_mali_lpae_cfg.transtab; |
129 | u64 memattr = cfg->arm_mali_lpae_cfg.memattr; |
130 | |
131 | mmu_hw_do_operation_locked(pfdev, as_nr, iova: 0, size: ~0ULL, AS_COMMAND_FLUSH_MEM); |
132 | |
133 | mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab)); |
134 | mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab)); |
135 | |
136 | /* Need to revisit mem attrs. |
137 | * NC is the default, Mali driver is inner WT. |
138 | */ |
139 | mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr)); |
140 | mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr)); |
141 | |
142 | write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); |
143 | } |
144 | |
145 | static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr) |
146 | { |
147 | mmu_hw_do_operation_locked(pfdev, as_nr, iova: 0, size: ~0ULL, AS_COMMAND_FLUSH_MEM); |
148 | |
149 | mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0); |
150 | mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0); |
151 | |
152 | mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0); |
153 | mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0); |
154 | |
155 | write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); |
156 | } |
157 | |
158 | u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) |
159 | { |
160 | int as; |
161 | |
162 | spin_lock(lock: &pfdev->as_lock); |
163 | |
164 | as = mmu->as; |
165 | if (as >= 0) { |
166 | int en = atomic_inc_return(v: &mmu->as_count); |
167 | u32 mask = BIT(as) | BIT(16 + as); |
168 | |
169 | /* |
170 | * AS can be retained by active jobs or a perfcnt context, |
171 | * hence the '+ 1' here. |
172 | */ |
173 | WARN_ON(en >= (NUM_JOB_SLOTS + 1)); |
174 | |
175 | list_move(list: &mmu->list, head: &pfdev->as_lru_list); |
176 | |
177 | if (pfdev->as_faulty_mask & mask) { |
178 | /* Unhandled pagefault on this AS, the MMU was |
179 | * disabled. We need to re-enable the MMU after |
180 | * clearing+unmasking the AS interrupts. |
181 | */ |
182 | mmu_write(pfdev, MMU_INT_CLEAR, mask); |
183 | mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask); |
184 | pfdev->as_faulty_mask &= ~mask; |
185 | panfrost_mmu_enable(pfdev, mmu); |
186 | } |
187 | |
188 | goto out; |
189 | } |
190 | |
191 | /* Check for a free AS */ |
192 | as = ffz(pfdev->as_alloc_mask); |
193 | if (!(BIT(as) & pfdev->features.as_present)) { |
194 | struct panfrost_mmu *lru_mmu; |
195 | |
196 | list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) { |
197 | if (!atomic_read(v: &lru_mmu->as_count)) |
198 | break; |
199 | } |
200 | WARN_ON(&lru_mmu->list == &pfdev->as_lru_list); |
201 | |
202 | list_del_init(entry: &lru_mmu->list); |
203 | as = lru_mmu->as; |
204 | |
205 | WARN_ON(as < 0); |
206 | lru_mmu->as = -1; |
207 | } |
208 | |
209 | /* Assign the free or reclaimed AS to the FD */ |
210 | mmu->as = as; |
211 | set_bit(nr: as, addr: &pfdev->as_alloc_mask); |
212 | atomic_set(v: &mmu->as_count, i: 1); |
213 | list_add(new: &mmu->list, head: &pfdev->as_lru_list); |
214 | |
215 | dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx" , as, mmu, pfdev->as_alloc_mask); |
216 | |
217 | panfrost_mmu_enable(pfdev, mmu); |
218 | |
219 | out: |
220 | spin_unlock(lock: &pfdev->as_lock); |
221 | return as; |
222 | } |
223 | |
224 | void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) |
225 | { |
226 | atomic_dec(v: &mmu->as_count); |
227 | WARN_ON(atomic_read(&mmu->as_count) < 0); |
228 | } |
229 | |
230 | void panfrost_mmu_reset(struct panfrost_device *pfdev) |
231 | { |
232 | struct panfrost_mmu *mmu, *mmu_tmp; |
233 | |
234 | clear_bit(nr: PANFROST_COMP_BIT_MMU, addr: pfdev->is_suspended); |
235 | |
236 | spin_lock(lock: &pfdev->as_lock); |
237 | |
238 | pfdev->as_alloc_mask = 0; |
239 | pfdev->as_faulty_mask = 0; |
240 | |
241 | list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) { |
242 | mmu->as = -1; |
243 | atomic_set(v: &mmu->as_count, i: 0); |
244 | list_del_init(entry: &mmu->list); |
245 | } |
246 | |
247 | spin_unlock(lock: &pfdev->as_lock); |
248 | |
249 | mmu_write(pfdev, MMU_INT_CLEAR, ~0); |
250 | mmu_write(pfdev, MMU_INT_MASK, ~0); |
251 | } |
252 | |
253 | static size_t get_pgsize(u64 addr, size_t size, size_t *count) |
254 | { |
255 | /* |
256 | * io-pgtable only operates on multiple pages within a single table |
257 | * entry, so we need to split at boundaries of the table size, i.e. |
258 | * the next block size up. The distance from address A to the next |
259 | * boundary of block size B is logically B - A % B, but in unsigned |
260 | * two's complement where B is a power of two we get the equivalence |
261 | * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :) |
262 | */ |
263 | size_t blk_offset = -addr % SZ_2M; |
264 | |
265 | if (blk_offset || size < SZ_2M) { |
266 | *count = min_not_zero(blk_offset, size) / SZ_4K; |
267 | return SZ_4K; |
268 | } |
269 | blk_offset = -addr % SZ_1G ?: SZ_1G; |
270 | *count = min(blk_offset, size) / SZ_2M; |
271 | return SZ_2M; |
272 | } |
273 | |
274 | static void panfrost_mmu_flush_range(struct panfrost_device *pfdev, |
275 | struct panfrost_mmu *mmu, |
276 | u64 iova, u64 size) |
277 | { |
278 | if (mmu->as < 0) |
279 | return; |
280 | |
281 | pm_runtime_get_noresume(dev: pfdev->dev); |
282 | |
283 | /* Flush the PTs only if we're already awake */ |
284 | if (pm_runtime_active(dev: pfdev->dev)) |
285 | mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT); |
286 | |
287 | pm_runtime_put_autosuspend(dev: pfdev->dev); |
288 | } |
289 | |
290 | static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, |
291 | u64 iova, int prot, struct sg_table *sgt) |
292 | { |
293 | unsigned int count; |
294 | struct scatterlist *sgl; |
295 | struct io_pgtable_ops *ops = mmu->pgtbl_ops; |
296 | u64 start_iova = iova; |
297 | |
298 | for_each_sgtable_dma_sg(sgt, sgl, count) { |
299 | unsigned long paddr = sg_dma_address(sgl); |
300 | size_t len = sg_dma_len(sgl); |
301 | |
302 | dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx" , mmu->as, iova, paddr, len); |
303 | |
304 | while (len) { |
305 | size_t pgcount, mapped = 0; |
306 | size_t pgsize = get_pgsize(addr: iova | paddr, size: len, count: &pgcount); |
307 | |
308 | ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, |
309 | GFP_KERNEL, &mapped); |
310 | /* Don't get stuck if things have gone wrong */ |
311 | mapped = max(mapped, pgsize); |
312 | iova += mapped; |
313 | paddr += mapped; |
314 | len -= mapped; |
315 | } |
316 | } |
317 | |
318 | panfrost_mmu_flush_range(pfdev, mmu, iova: start_iova, size: iova - start_iova); |
319 | |
320 | return 0; |
321 | } |
322 | |
323 | int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) |
324 | { |
325 | struct panfrost_gem_object *bo = mapping->obj; |
326 | struct drm_gem_shmem_object *shmem = &bo->base; |
327 | struct drm_gem_object *obj = &shmem->base; |
328 | struct panfrost_device *pfdev = to_panfrost_device(ddev: obj->dev); |
329 | struct sg_table *sgt; |
330 | int prot = IOMMU_READ | IOMMU_WRITE; |
331 | |
332 | if (WARN_ON(mapping->active)) |
333 | return 0; |
334 | |
335 | if (bo->noexec) |
336 | prot |= IOMMU_NOEXEC; |
337 | |
338 | sgt = drm_gem_shmem_get_pages_sgt(shmem); |
339 | if (WARN_ON(IS_ERR(sgt))) |
340 | return PTR_ERR(ptr: sgt); |
341 | |
342 | mmu_map_sg(pfdev, mmu: mapping->mmu, iova: mapping->mmnode.start << PAGE_SHIFT, |
343 | prot, sgt); |
344 | mapping->active = true; |
345 | |
346 | return 0; |
347 | } |
348 | |
349 | void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping) |
350 | { |
351 | struct panfrost_gem_object *bo = mapping->obj; |
352 | struct drm_gem_object *obj = &bo->base.base; |
353 | struct panfrost_device *pfdev = to_panfrost_device(ddev: obj->dev); |
354 | struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops; |
355 | u64 iova = mapping->mmnode.start << PAGE_SHIFT; |
356 | size_t len = mapping->mmnode.size << PAGE_SHIFT; |
357 | size_t unmapped_len = 0; |
358 | |
359 | if (WARN_ON(!mapping->active)) |
360 | return; |
361 | |
362 | dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx" , |
363 | mapping->mmu->as, iova, len); |
364 | |
365 | while (unmapped_len < len) { |
366 | size_t unmapped_page, pgcount; |
367 | size_t pgsize = get_pgsize(addr: iova, size: len - unmapped_len, count: &pgcount); |
368 | |
369 | if (bo->is_heap) |
370 | pgcount = 1; |
371 | if (!bo->is_heap || ops->iova_to_phys(ops, iova)) { |
372 | unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL); |
373 | WARN_ON(unmapped_page != pgsize * pgcount); |
374 | } |
375 | iova += pgsize * pgcount; |
376 | unmapped_len += pgsize * pgcount; |
377 | } |
378 | |
379 | panfrost_mmu_flush_range(pfdev, mmu: mapping->mmu, |
380 | iova: mapping->mmnode.start << PAGE_SHIFT, size: len); |
381 | mapping->active = false; |
382 | } |
383 | |
384 | static void mmu_tlb_inv_context_s1(void *cookie) |
385 | {} |
386 | |
387 | static void mmu_tlb_sync_context(void *cookie) |
388 | { |
389 | //struct panfrost_mmu *mmu = cookie; |
390 | // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X |
391 | } |
392 | |
393 | static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, |
394 | void *cookie) |
395 | { |
396 | mmu_tlb_sync_context(cookie); |
397 | } |
398 | |
399 | static const struct iommu_flush_ops mmu_tlb_ops = { |
400 | .tlb_flush_all = mmu_tlb_inv_context_s1, |
401 | .tlb_flush_walk = mmu_tlb_flush_walk, |
402 | }; |
403 | |
404 | static struct panfrost_gem_mapping * |
405 | addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr) |
406 | { |
407 | struct panfrost_gem_mapping *mapping = NULL; |
408 | struct drm_mm_node *node; |
409 | u64 offset = addr >> PAGE_SHIFT; |
410 | struct panfrost_mmu *mmu; |
411 | |
412 | spin_lock(lock: &pfdev->as_lock); |
413 | list_for_each_entry(mmu, &pfdev->as_lru_list, list) { |
414 | if (as == mmu->as) |
415 | goto found_mmu; |
416 | } |
417 | goto out; |
418 | |
419 | found_mmu: |
420 | |
421 | spin_lock(lock: &mmu->mm_lock); |
422 | |
423 | drm_mm_for_each_node(node, &mmu->mm) { |
424 | if (offset >= node->start && |
425 | offset < (node->start + node->size)) { |
426 | mapping = drm_mm_node_to_panfrost_mapping(node); |
427 | |
428 | kref_get(kref: &mapping->refcount); |
429 | break; |
430 | } |
431 | } |
432 | |
433 | spin_unlock(lock: &mmu->mm_lock); |
434 | out: |
435 | spin_unlock(lock: &pfdev->as_lock); |
436 | return mapping; |
437 | } |
438 | |
439 | #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) |
440 | |
441 | static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, |
442 | u64 addr) |
443 | { |
444 | int ret, i; |
445 | struct panfrost_gem_mapping *bomapping; |
446 | struct panfrost_gem_object *bo; |
447 | struct address_space *mapping; |
448 | struct drm_gem_object *obj; |
449 | pgoff_t page_offset; |
450 | struct sg_table *sgt; |
451 | struct page **pages; |
452 | |
453 | bomapping = addr_to_mapping(pfdev, as, addr); |
454 | if (!bomapping) |
455 | return -ENOENT; |
456 | |
457 | bo = bomapping->obj; |
458 | if (!bo->is_heap) { |
459 | dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)" , |
460 | bomapping->mmnode.start << PAGE_SHIFT); |
461 | ret = -EINVAL; |
462 | goto err_bo; |
463 | } |
464 | WARN_ON(bomapping->mmu->as != as); |
465 | |
466 | /* Assume 2MB alignment and size multiple */ |
467 | addr &= ~((u64)SZ_2M - 1); |
468 | page_offset = addr >> PAGE_SHIFT; |
469 | page_offset -= bomapping->mmnode.start; |
470 | |
471 | obj = &bo->base.base; |
472 | |
473 | dma_resv_lock(obj: obj->resv, NULL); |
474 | |
475 | if (!bo->base.pages) { |
476 | bo->sgts = kvmalloc_array(n: bo->base.base.size / SZ_2M, |
477 | size: sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); |
478 | if (!bo->sgts) { |
479 | ret = -ENOMEM; |
480 | goto err_unlock; |
481 | } |
482 | |
483 | pages = kvmalloc_array(n: bo->base.base.size >> PAGE_SHIFT, |
484 | size: sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); |
485 | if (!pages) { |
486 | kvfree(addr: bo->sgts); |
487 | bo->sgts = NULL; |
488 | ret = -ENOMEM; |
489 | goto err_unlock; |
490 | } |
491 | bo->base.pages = pages; |
492 | bo->base.pages_use_count = 1; |
493 | } else { |
494 | pages = bo->base.pages; |
495 | if (pages[page_offset]) { |
496 | /* Pages are already mapped, bail out. */ |
497 | goto out; |
498 | } |
499 | } |
500 | |
501 | mapping = bo->base.base.filp->f_mapping; |
502 | mapping_set_unevictable(mapping); |
503 | |
504 | for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { |
505 | /* Can happen if the last fault only partially filled this |
506 | * section of the pages array before failing. In that case |
507 | * we skip already filled pages. |
508 | */ |
509 | if (pages[i]) |
510 | continue; |
511 | |
512 | pages[i] = shmem_read_mapping_page(mapping, index: i); |
513 | if (IS_ERR(ptr: pages[i])) { |
514 | ret = PTR_ERR(ptr: pages[i]); |
515 | pages[i] = NULL; |
516 | goto err_unlock; |
517 | } |
518 | } |
519 | |
520 | sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; |
521 | ret = sg_alloc_table_from_pages(sgt, pages: pages + page_offset, |
522 | NUM_FAULT_PAGES, offset: 0, SZ_2M, GFP_KERNEL); |
523 | if (ret) |
524 | goto err_unlock; |
525 | |
526 | ret = dma_map_sgtable(dev: pfdev->dev, sgt, dir: DMA_BIDIRECTIONAL, attrs: 0); |
527 | if (ret) |
528 | goto err_map; |
529 | |
530 | mmu_map_sg(pfdev, mmu: bomapping->mmu, iova: addr, |
531 | IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); |
532 | |
533 | bomapping->active = true; |
534 | bo->heap_rss_size += SZ_2M; |
535 | |
536 | dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx" , as, addr); |
537 | |
538 | out: |
539 | dma_resv_unlock(obj: obj->resv); |
540 | |
541 | panfrost_gem_mapping_put(mapping: bomapping); |
542 | |
543 | return 0; |
544 | |
545 | err_map: |
546 | sg_free_table(sgt); |
547 | err_unlock: |
548 | dma_resv_unlock(obj: obj->resv); |
549 | err_bo: |
550 | panfrost_gem_mapping_put(mapping: bomapping); |
551 | return ret; |
552 | } |
553 | |
554 | static void panfrost_mmu_release_ctx(struct kref *kref) |
555 | { |
556 | struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu, |
557 | refcount); |
558 | struct panfrost_device *pfdev = mmu->pfdev; |
559 | |
560 | spin_lock(lock: &pfdev->as_lock); |
561 | if (mmu->as >= 0) { |
562 | pm_runtime_get_noresume(dev: pfdev->dev); |
563 | if (pm_runtime_active(dev: pfdev->dev)) |
564 | panfrost_mmu_disable(pfdev, as_nr: mmu->as); |
565 | pm_runtime_put_autosuspend(dev: pfdev->dev); |
566 | |
567 | clear_bit(nr: mmu->as, addr: &pfdev->as_alloc_mask); |
568 | clear_bit(nr: mmu->as, addr: &pfdev->as_in_use_mask); |
569 | list_del(entry: &mmu->list); |
570 | } |
571 | spin_unlock(lock: &pfdev->as_lock); |
572 | |
573 | free_io_pgtable_ops(ops: mmu->pgtbl_ops); |
574 | drm_mm_takedown(mm: &mmu->mm); |
575 | kfree(objp: mmu); |
576 | } |
577 | |
578 | void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu) |
579 | { |
580 | kref_put(kref: &mmu->refcount, release: panfrost_mmu_release_ctx); |
581 | } |
582 | |
583 | struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu) |
584 | { |
585 | kref_get(kref: &mmu->refcount); |
586 | |
587 | return mmu; |
588 | } |
589 | |
590 | #define PFN_4G (SZ_4G >> PAGE_SHIFT) |
591 | #define PFN_4G_MASK (PFN_4G - 1) |
592 | #define PFN_16M (SZ_16M >> PAGE_SHIFT) |
593 | |
594 | static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node, |
595 | unsigned long color, |
596 | u64 *start, u64 *end) |
597 | { |
598 | /* Executable buffers can't start or end on a 4GB boundary */ |
599 | if (!(color & PANFROST_BO_NOEXEC)) { |
600 | u64 next_seg; |
601 | |
602 | if ((*start & PFN_4G_MASK) == 0) |
603 | (*start)++; |
604 | |
605 | if ((*end & PFN_4G_MASK) == 0) |
606 | (*end)--; |
607 | |
608 | next_seg = ALIGN(*start, PFN_4G); |
609 | if (next_seg - *start <= PFN_16M) |
610 | *start = next_seg + 1; |
611 | |
612 | *end = min(*end, ALIGN(*start, PFN_4G) - 1); |
613 | } |
614 | } |
615 | |
616 | struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev) |
617 | { |
618 | struct panfrost_mmu *mmu; |
619 | |
620 | mmu = kzalloc(size: sizeof(*mmu), GFP_KERNEL); |
621 | if (!mmu) |
622 | return ERR_PTR(error: -ENOMEM); |
623 | |
624 | mmu->pfdev = pfdev; |
625 | spin_lock_init(&mmu->mm_lock); |
626 | |
627 | /* 4G enough for now. can be 48-bit */ |
628 | drm_mm_init(mm: &mmu->mm, SZ_32M >> PAGE_SHIFT, size: (SZ_4G - SZ_32M) >> PAGE_SHIFT); |
629 | mmu->mm.color_adjust = panfrost_drm_mm_color_adjust; |
630 | |
631 | INIT_LIST_HEAD(list: &mmu->list); |
632 | mmu->as = -1; |
633 | |
634 | mmu->pgtbl_cfg = (struct io_pgtable_cfg) { |
635 | .pgsize_bitmap = SZ_4K | SZ_2M, |
636 | .ias = FIELD_GET(0xff, pfdev->features.mmu_features), |
637 | .oas = FIELD_GET(0xff00, pfdev->features.mmu_features), |
638 | .coherent_walk = pfdev->coherent, |
639 | .tlb = &mmu_tlb_ops, |
640 | .iommu_dev = pfdev->dev, |
641 | }; |
642 | |
643 | mmu->pgtbl_ops = alloc_io_pgtable_ops(fmt: ARM_MALI_LPAE, cfg: &mmu->pgtbl_cfg, |
644 | cookie: mmu); |
645 | if (!mmu->pgtbl_ops) { |
646 | kfree(objp: mmu); |
647 | return ERR_PTR(error: -EINVAL); |
648 | } |
649 | |
650 | kref_init(kref: &mmu->refcount); |
651 | |
652 | return mmu; |
653 | } |
654 | |
655 | static const char *access_type_name(struct panfrost_device *pfdev, |
656 | u32 fault_status) |
657 | { |
658 | switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) { |
659 | case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC: |
660 | if (panfrost_has_hw_feature(pfdev, feat: HW_FEATURE_AARCH64_MMU)) |
661 | return "ATOMIC" ; |
662 | else |
663 | return "UNKNOWN" ; |
664 | case AS_FAULTSTATUS_ACCESS_TYPE_READ: |
665 | return "READ" ; |
666 | case AS_FAULTSTATUS_ACCESS_TYPE_WRITE: |
667 | return "WRITE" ; |
668 | case AS_FAULTSTATUS_ACCESS_TYPE_EX: |
669 | return "EXECUTE" ; |
670 | default: |
671 | WARN_ON(1); |
672 | return NULL; |
673 | } |
674 | } |
675 | |
676 | static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) |
677 | { |
678 | struct panfrost_device *pfdev = data; |
679 | |
680 | if (test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) |
681 | return IRQ_NONE; |
682 | |
683 | if (!mmu_read(pfdev, MMU_INT_STAT)) |
684 | return IRQ_NONE; |
685 | |
686 | mmu_write(pfdev, MMU_INT_MASK, 0); |
687 | return IRQ_WAKE_THREAD; |
688 | } |
689 | |
690 | static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) |
691 | { |
692 | struct panfrost_device *pfdev = data; |
693 | u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT); |
694 | int ret; |
695 | |
696 | while (status) { |
697 | u32 as = ffs(status | (status >> 16)) - 1; |
698 | u32 mask = BIT(as) | BIT(as + 16); |
699 | u64 addr; |
700 | u32 fault_status; |
701 | u32 exception_type; |
702 | u32 access_type; |
703 | u32 source_id; |
704 | |
705 | fault_status = mmu_read(pfdev, AS_FAULTSTATUS(as)); |
706 | addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(as)); |
707 | addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(as)) << 32; |
708 | |
709 | /* decode the fault status */ |
710 | exception_type = fault_status & 0xFF; |
711 | access_type = (fault_status >> 8) & 0x3; |
712 | source_id = (fault_status >> 16); |
713 | |
714 | mmu_write(pfdev, MMU_INT_CLEAR, mask); |
715 | |
716 | /* Page fault only */ |
717 | ret = -1; |
718 | if ((status & mask) == BIT(as) && (exception_type & 0xF8) == 0xC0) |
719 | ret = panfrost_mmu_map_fault_addr(pfdev, as, addr); |
720 | |
721 | if (ret) { |
722 | /* terminal fault, print info about the fault */ |
723 | dev_err(pfdev->dev, |
724 | "Unhandled Page fault in AS%d at VA 0x%016llX\n" |
725 | "Reason: %s\n" |
726 | "raw fault status: 0x%X\n" |
727 | "decoded fault status: %s\n" |
728 | "exception type 0x%X: %s\n" |
729 | "access type 0x%X: %s\n" |
730 | "source id 0x%X\n" , |
731 | as, addr, |
732 | "TODO" , |
733 | fault_status, |
734 | (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT" ), |
735 | exception_type, panfrost_exception_name(exception_type), |
736 | access_type, access_type_name(pfdev, fault_status), |
737 | source_id); |
738 | |
739 | spin_lock(lock: &pfdev->as_lock); |
740 | /* Ignore MMU interrupts on this AS until it's been |
741 | * re-enabled. |
742 | */ |
743 | pfdev->as_faulty_mask |= mask; |
744 | |
745 | /* Disable the MMU to kill jobs on this AS. */ |
746 | panfrost_mmu_disable(pfdev, as_nr: as); |
747 | spin_unlock(lock: &pfdev->as_lock); |
748 | } |
749 | |
750 | status &= ~mask; |
751 | |
752 | /* If we received new MMU interrupts, process them before returning. */ |
753 | if (!status) |
754 | status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask; |
755 | } |
756 | |
757 | /* Enable interrupts only if we're not about to get suspended */ |
758 | if (!test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) { |
759 | spin_lock(lock: &pfdev->as_lock); |
760 | mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask); |
761 | spin_unlock(lock: &pfdev->as_lock); |
762 | } |
763 | |
764 | return IRQ_HANDLED; |
765 | }; |
766 | |
767 | int panfrost_mmu_init(struct panfrost_device *pfdev) |
768 | { |
769 | int err; |
770 | |
771 | pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu" ); |
772 | if (pfdev->mmu_irq < 0) |
773 | return pfdev->mmu_irq; |
774 | |
775 | err = devm_request_threaded_irq(dev: pfdev->dev, irq: pfdev->mmu_irq, |
776 | handler: panfrost_mmu_irq_handler, |
777 | thread_fn: panfrost_mmu_irq_handler_thread, |
778 | IRQF_SHARED, KBUILD_MODNAME "-mmu" , |
779 | dev_id: pfdev); |
780 | |
781 | if (err) { |
782 | dev_err(pfdev->dev, "failed to request mmu irq" ); |
783 | return err; |
784 | } |
785 | |
786 | return 0; |
787 | } |
788 | |
789 | void panfrost_mmu_fini(struct panfrost_device *pfdev) |
790 | { |
791 | mmu_write(pfdev, MMU_INT_MASK, 0); |
792 | } |
793 | |
794 | void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev) |
795 | { |
796 | set_bit(nr: PANFROST_COMP_BIT_MMU, addr: pfdev->is_suspended); |
797 | |
798 | mmu_write(pfdev, MMU_INT_MASK, 0); |
799 | synchronize_irq(irq: pfdev->mmu_irq); |
800 | } |
801 | |