1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2020-2023 Intel Corporation |
4 | */ |
5 | |
6 | #include <linux/dma-buf.h> |
7 | #include <linux/highmem.h> |
8 | #include <linux/module.h> |
9 | #include <linux/set_memory.h> |
10 | #include <linux/xarray.h> |
11 | |
12 | #include <drm/drm_cache.h> |
13 | #include <drm/drm_debugfs.h> |
14 | #include <drm/drm_file.h> |
15 | #include <drm/drm_utils.h> |
16 | |
17 | #include "ivpu_drv.h" |
18 | #include "ivpu_gem.h" |
19 | #include "ivpu_hw.h" |
20 | #include "ivpu_mmu.h" |
21 | #include "ivpu_mmu_context.h" |
22 | |
23 | MODULE_IMPORT_NS(DMA_BUF); |
24 | |
25 | static const struct drm_gem_object_funcs ivpu_gem_funcs; |
26 | |
27 | static struct lock_class_key prime_bo_lock_class_key; |
28 | |
29 | static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo) |
30 | { |
31 | /* Pages are managed by the underlying dma-buf */ |
32 | return 0; |
33 | } |
34 | |
35 | static void prime_free_pages_locked(struct ivpu_bo *bo) |
36 | { |
37 | /* Pages are managed by the underlying dma-buf */ |
38 | } |
39 | |
40 | static int prime_map_pages_locked(struct ivpu_bo *bo) |
41 | { |
42 | struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); |
43 | struct sg_table *sgt; |
44 | |
45 | sgt = dma_buf_map_attachment_unlocked(attach: bo->base.import_attach, direction: DMA_BIDIRECTIONAL); |
46 | if (IS_ERR(ptr: sgt)) { |
47 | ivpu_err(vdev, "Failed to map attachment: %ld\n" , PTR_ERR(sgt)); |
48 | return PTR_ERR(ptr: sgt); |
49 | } |
50 | |
51 | bo->sgt = sgt; |
52 | return 0; |
53 | } |
54 | |
55 | static void prime_unmap_pages_locked(struct ivpu_bo *bo) |
56 | { |
57 | dma_buf_unmap_attachment_unlocked(attach: bo->base.import_attach, sg_table: bo->sgt, direction: DMA_BIDIRECTIONAL); |
58 | bo->sgt = NULL; |
59 | } |
60 | |
61 | static const struct ivpu_bo_ops prime_ops = { |
62 | .type = IVPU_BO_TYPE_PRIME, |
63 | .name = "prime" , |
64 | .alloc_pages = prime_alloc_pages_locked, |
65 | .free_pages = prime_free_pages_locked, |
66 | .map_pages = prime_map_pages_locked, |
67 | .unmap_pages = prime_unmap_pages_locked, |
68 | }; |
69 | |
70 | static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo) |
71 | { |
72 | int npages = ivpu_bo_size(bo) >> PAGE_SHIFT; |
73 | struct page **pages; |
74 | |
75 | pages = drm_gem_get_pages(obj: &bo->base); |
76 | if (IS_ERR(ptr: pages)) |
77 | return PTR_ERR(ptr: pages); |
78 | |
79 | if (bo->flags & DRM_IVPU_BO_WC) |
80 | set_pages_array_wc(pages, addrinarray: npages); |
81 | else if (bo->flags & DRM_IVPU_BO_UNCACHED) |
82 | set_pages_array_uc(pages, addrinarray: npages); |
83 | |
84 | bo->pages = pages; |
85 | return 0; |
86 | } |
87 | |
88 | static void shmem_free_pages_locked(struct ivpu_bo *bo) |
89 | { |
90 | if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) |
91 | set_pages_array_wb(pages: bo->pages, addrinarray: ivpu_bo_size(bo) >> PAGE_SHIFT); |
92 | |
93 | drm_gem_put_pages(obj: &bo->base, pages: bo->pages, dirty: true, accessed: false); |
94 | bo->pages = NULL; |
95 | } |
96 | |
97 | static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo) |
98 | { |
99 | int npages = ivpu_bo_size(bo) >> PAGE_SHIFT; |
100 | struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); |
101 | struct sg_table *sgt; |
102 | int ret; |
103 | |
104 | sgt = drm_prime_pages_to_sg(dev: &vdev->drm, pages: bo->pages, nr_pages: npages); |
105 | if (IS_ERR(ptr: sgt)) { |
106 | ivpu_err(vdev, "Failed to allocate sgtable\n" ); |
107 | return PTR_ERR(ptr: sgt); |
108 | } |
109 | |
110 | ret = dma_map_sgtable(dev: vdev->drm.dev, sgt, dir: DMA_BIDIRECTIONAL, attrs: 0); |
111 | if (ret) { |
112 | ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n" , ret); |
113 | goto err_free_sgt; |
114 | } |
115 | |
116 | bo->sgt = sgt; |
117 | return 0; |
118 | |
119 | err_free_sgt: |
120 | kfree(objp: sgt); |
121 | return ret; |
122 | } |
123 | |
124 | static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo) |
125 | { |
126 | struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); |
127 | |
128 | dma_unmap_sgtable(dev: vdev->drm.dev, sgt: bo->sgt, dir: DMA_BIDIRECTIONAL, attrs: 0); |
129 | sg_free_table(bo->sgt); |
130 | kfree(objp: bo->sgt); |
131 | bo->sgt = NULL; |
132 | } |
133 | |
134 | static const struct ivpu_bo_ops shmem_ops = { |
135 | .type = IVPU_BO_TYPE_SHMEM, |
136 | .name = "shmem" , |
137 | .alloc_pages = shmem_alloc_pages_locked, |
138 | .free_pages = shmem_free_pages_locked, |
139 | .map_pages = ivpu_bo_map_pages_locked, |
140 | .unmap_pages = ivpu_bo_unmap_pages_locked, |
141 | }; |
142 | |
143 | static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo) |
144 | { |
145 | unsigned int i, npages = ivpu_bo_size(bo) >> PAGE_SHIFT; |
146 | struct page **pages; |
147 | int ret; |
148 | |
149 | pages = kvmalloc_array(n: npages, size: sizeof(*bo->pages), GFP_KERNEL); |
150 | if (!pages) |
151 | return -ENOMEM; |
152 | |
153 | for (i = 0; i < npages; i++) { |
154 | pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); |
155 | if (!pages[i]) { |
156 | ret = -ENOMEM; |
157 | goto err_free_pages; |
158 | } |
159 | cond_resched(); |
160 | } |
161 | |
162 | bo->pages = pages; |
163 | return 0; |
164 | |
165 | err_free_pages: |
166 | while (i--) |
167 | put_page(page: pages[i]); |
168 | kvfree(addr: pages); |
169 | return ret; |
170 | } |
171 | |
172 | static void internal_free_pages_locked(struct ivpu_bo *bo) |
173 | { |
174 | unsigned int i, npages = ivpu_bo_size(bo) >> PAGE_SHIFT; |
175 | |
176 | if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) |
177 | set_pages_array_wb(pages: bo->pages, addrinarray: ivpu_bo_size(bo) >> PAGE_SHIFT); |
178 | |
179 | for (i = 0; i < npages; i++) |
180 | put_page(page: bo->pages[i]); |
181 | |
182 | kvfree(addr: bo->pages); |
183 | bo->pages = NULL; |
184 | } |
185 | |
186 | static const struct ivpu_bo_ops internal_ops = { |
187 | .type = IVPU_BO_TYPE_INTERNAL, |
188 | .name = "internal" , |
189 | .alloc_pages = internal_alloc_pages_locked, |
190 | .free_pages = internal_free_pages_locked, |
191 | .map_pages = ivpu_bo_map_pages_locked, |
192 | .unmap_pages = ivpu_bo_unmap_pages_locked, |
193 | }; |
194 | |
195 | static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo) |
196 | { |
197 | struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); |
198 | int ret; |
199 | |
200 | lockdep_assert_held(&bo->lock); |
201 | drm_WARN_ON(&vdev->drm, bo->sgt); |
202 | |
203 | ret = bo->ops->alloc_pages(bo); |
204 | if (ret) { |
205 | ivpu_err(vdev, "Failed to allocate pages for BO: %d" , ret); |
206 | return ret; |
207 | } |
208 | |
209 | ret = bo->ops->map_pages(bo); |
210 | if (ret) { |
211 | ivpu_err(vdev, "Failed to map pages for BO: %d" , ret); |
212 | goto err_free_pages; |
213 | } |
214 | return ret; |
215 | |
216 | err_free_pages: |
217 | bo->ops->free_pages(bo); |
218 | return ret; |
219 | } |
220 | |
221 | static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo) |
222 | { |
223 | mutex_lock(&bo->lock); |
224 | |
225 | WARN_ON(!bo->sgt); |
226 | bo->ops->unmap_pages(bo); |
227 | WARN_ON(bo->sgt); |
228 | bo->ops->free_pages(bo); |
229 | WARN_ON(bo->pages); |
230 | |
231 | mutex_unlock(lock: &bo->lock); |
232 | } |
233 | |
234 | /* |
235 | * ivpu_bo_pin() - pin the backing physical pages and map them to VPU. |
236 | * |
237 | * This function pins physical memory pages, then maps the physical pages |
238 | * to IOMMU address space and finally updates the VPU MMU page tables |
239 | * to allow the VPU to translate VPU address to IOMMU address. |
240 | */ |
241 | int __must_check ivpu_bo_pin(struct ivpu_bo *bo) |
242 | { |
243 | struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); |
244 | int ret = 0; |
245 | |
246 | mutex_lock(&bo->lock); |
247 | |
248 | if (!bo->vpu_addr) { |
249 | ivpu_err(vdev, "vpu_addr not set for BO ctx_id: %d handle: %d\n" , |
250 | bo->ctx->id, bo->handle); |
251 | ret = -EINVAL; |
252 | goto unlock; |
253 | } |
254 | |
255 | if (!bo->sgt) { |
256 | ret = ivpu_bo_alloc_and_map_pages_locked(bo); |
257 | if (ret) |
258 | goto unlock; |
259 | } |
260 | |
261 | if (!bo->mmu_mapped) { |
262 | ret = ivpu_mmu_context_map_sgt(vdev, ctx: bo->ctx, vpu_addr: bo->vpu_addr, sgt: bo->sgt, |
263 | llc_coherent: ivpu_bo_is_snooped(bo)); |
264 | if (ret) { |
265 | ivpu_err(vdev, "Failed to map BO in MMU: %d\n" , ret); |
266 | goto unlock; |
267 | } |
268 | bo->mmu_mapped = true; |
269 | } |
270 | |
271 | unlock: |
272 | mutex_unlock(lock: &bo->lock); |
273 | |
274 | return ret; |
275 | } |
276 | |
277 | static int |
278 | ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, |
279 | const struct ivpu_addr_range *range) |
280 | { |
281 | struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); |
282 | int ret; |
283 | |
284 | if (!range) { |
285 | if (bo->flags & DRM_IVPU_BO_SHAVE_MEM) |
286 | range = &vdev->hw->ranges.shave; |
287 | else if (bo->flags & DRM_IVPU_BO_DMA_MEM) |
288 | range = &vdev->hw->ranges.dma; |
289 | else |
290 | range = &vdev->hw->ranges.user; |
291 | } |
292 | |
293 | mutex_lock(&ctx->lock); |
294 | ret = ivpu_mmu_context_insert_node_locked(ctx, range, size: ivpu_bo_size(bo), node: &bo->mm_node); |
295 | if (!ret) { |
296 | bo->ctx = ctx; |
297 | bo->vpu_addr = bo->mm_node.start; |
298 | list_add_tail(new: &bo->ctx_node, head: &ctx->bo_list); |
299 | } |
300 | mutex_unlock(lock: &ctx->lock); |
301 | |
302 | return ret; |
303 | } |
304 | |
305 | static void ivpu_bo_free_vpu_addr(struct ivpu_bo *bo) |
306 | { |
307 | struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); |
308 | struct ivpu_mmu_context *ctx = bo->ctx; |
309 | |
310 | ivpu_dbg(vdev, BO, "remove from ctx: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n" , |
311 | ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped); |
312 | |
313 | mutex_lock(&bo->lock); |
314 | |
315 | if (bo->mmu_mapped) { |
316 | drm_WARN_ON(&vdev->drm, !bo->sgt); |
317 | ivpu_mmu_context_unmap_sgt(vdev, ctx, vpu_addr: bo->vpu_addr, sgt: bo->sgt); |
318 | bo->mmu_mapped = false; |
319 | } |
320 | |
321 | mutex_lock(&ctx->lock); |
322 | list_del(entry: &bo->ctx_node); |
323 | bo->vpu_addr = 0; |
324 | bo->ctx = NULL; |
325 | ivpu_mmu_context_remove_node_locked(ctx, node: &bo->mm_node); |
326 | mutex_unlock(lock: &ctx->lock); |
327 | |
328 | mutex_unlock(lock: &bo->lock); |
329 | } |
330 | |
331 | void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx) |
332 | { |
333 | struct ivpu_bo *bo, *tmp; |
334 | |
335 | list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node) |
336 | ivpu_bo_free_vpu_addr(bo); |
337 | } |
338 | |
339 | static struct ivpu_bo * |
340 | ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context, |
341 | u64 size, u32 flags, const struct ivpu_bo_ops *ops, |
342 | const struct ivpu_addr_range *range, u64 user_ptr) |
343 | { |
344 | struct ivpu_bo *bo; |
345 | int ret = 0; |
346 | |
347 | if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size))) |
348 | return ERR_PTR(error: -EINVAL); |
349 | |
350 | switch (flags & DRM_IVPU_BO_CACHE_MASK) { |
351 | case DRM_IVPU_BO_CACHED: |
352 | case DRM_IVPU_BO_UNCACHED: |
353 | case DRM_IVPU_BO_WC: |
354 | break; |
355 | default: |
356 | return ERR_PTR(error: -EINVAL); |
357 | } |
358 | |
359 | bo = kzalloc(size: sizeof(*bo), GFP_KERNEL); |
360 | if (!bo) |
361 | return ERR_PTR(error: -ENOMEM); |
362 | |
363 | mutex_init(&bo->lock); |
364 | bo->base.funcs = &ivpu_gem_funcs; |
365 | bo->flags = flags; |
366 | bo->ops = ops; |
367 | bo->user_ptr = user_ptr; |
368 | |
369 | if (ops->type == IVPU_BO_TYPE_SHMEM) |
370 | ret = drm_gem_object_init(dev: &vdev->drm, obj: &bo->base, size); |
371 | else |
372 | drm_gem_private_object_init(dev: &vdev->drm, obj: &bo->base, size); |
373 | |
374 | if (ret) { |
375 | ivpu_err(vdev, "Failed to initialize drm object\n" ); |
376 | goto err_free; |
377 | } |
378 | |
379 | if (flags & DRM_IVPU_BO_MAPPABLE) { |
380 | ret = drm_gem_create_mmap_offset(obj: &bo->base); |
381 | if (ret) { |
382 | ivpu_err(vdev, "Failed to allocate mmap offset\n" ); |
383 | goto err_release; |
384 | } |
385 | } |
386 | |
387 | if (mmu_context) { |
388 | ret = ivpu_bo_alloc_vpu_addr(bo, ctx: mmu_context, range); |
389 | if (ret) { |
390 | ivpu_err(vdev, "Failed to add BO to context: %d\n" , ret); |
391 | goto err_release; |
392 | } |
393 | } |
394 | |
395 | return bo; |
396 | |
397 | err_release: |
398 | drm_gem_object_release(obj: &bo->base); |
399 | err_free: |
400 | kfree(objp: bo); |
401 | return ERR_PTR(error: ret); |
402 | } |
403 | |
404 | static void ivpu_bo_free(struct drm_gem_object *obj) |
405 | { |
406 | struct ivpu_bo *bo = to_ivpu_bo(obj); |
407 | struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); |
408 | |
409 | if (bo->ctx) |
410 | ivpu_dbg(vdev, BO, "free: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n" , |
411 | bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped); |
412 | else |
413 | ivpu_dbg(vdev, BO, "free: ctx (released) allocated %d mmu_mapped %d\n" , |
414 | (bool)bo->sgt, bo->mmu_mapped); |
415 | |
416 | drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); |
417 | |
418 | vunmap(addr: bo->kvaddr); |
419 | |
420 | if (bo->ctx) |
421 | ivpu_bo_free_vpu_addr(bo); |
422 | |
423 | if (bo->sgt) |
424 | ivpu_bo_unmap_and_free_pages(bo); |
425 | |
426 | if (bo->base.import_attach) |
427 | drm_prime_gem_destroy(obj: &bo->base, sg: bo->sgt); |
428 | |
429 | drm_gem_object_release(obj: &bo->base); |
430 | |
431 | mutex_destroy(lock: &bo->lock); |
432 | kfree(objp: bo); |
433 | } |
434 | |
435 | static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) |
436 | { |
437 | struct ivpu_bo *bo = to_ivpu_bo(obj); |
438 | struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); |
439 | |
440 | ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s" , |
441 | bo->ctx->id, bo->handle, bo->vpu_addr, ivpu_bo_size(bo), bo->ops->name); |
442 | |
443 | if (obj->import_attach) { |
444 | /* Drop the reference drm_gem_mmap_obj() acquired.*/ |
445 | drm_gem_object_put(obj); |
446 | vma->vm_private_data = NULL; |
447 | return dma_buf_mmap(obj->dma_buf, vma, 0); |
448 | } |
449 | |
450 | vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND); |
451 | vma->vm_page_prot = ivpu_bo_pgprot(bo, prot: vm_get_page_prot(vm_flags: vma->vm_flags)); |
452 | |
453 | return 0; |
454 | } |
455 | |
456 | static struct sg_table *ivpu_bo_get_sg_table(struct drm_gem_object *obj) |
457 | { |
458 | struct ivpu_bo *bo = to_ivpu_bo(obj); |
459 | loff_t npages = obj->size >> PAGE_SHIFT; |
460 | int ret = 0; |
461 | |
462 | mutex_lock(&bo->lock); |
463 | |
464 | if (!bo->sgt) |
465 | ret = ivpu_bo_alloc_and_map_pages_locked(bo); |
466 | |
467 | mutex_unlock(lock: &bo->lock); |
468 | |
469 | if (ret) |
470 | return ERR_PTR(error: ret); |
471 | |
472 | return drm_prime_pages_to_sg(dev: obj->dev, pages: bo->pages, nr_pages: npages); |
473 | } |
474 | |
475 | static vm_fault_t ivpu_vm_fault(struct vm_fault *vmf) |
476 | { |
477 | struct vm_area_struct *vma = vmf->vma; |
478 | struct drm_gem_object *obj = vma->vm_private_data; |
479 | struct ivpu_bo *bo = to_ivpu_bo(obj); |
480 | loff_t npages = obj->size >> PAGE_SHIFT; |
481 | pgoff_t page_offset; |
482 | struct page *page; |
483 | vm_fault_t ret; |
484 | int err; |
485 | |
486 | mutex_lock(&bo->lock); |
487 | |
488 | if (!bo->sgt) { |
489 | err = ivpu_bo_alloc_and_map_pages_locked(bo); |
490 | if (err) { |
491 | ret = vmf_error(err); |
492 | goto unlock; |
493 | } |
494 | } |
495 | |
496 | /* We don't use vmf->pgoff since that has the fake offset */ |
497 | page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; |
498 | if (page_offset >= npages) { |
499 | ret = VM_FAULT_SIGBUS; |
500 | } else { |
501 | page = bo->pages[page_offset]; |
502 | ret = vmf_insert_pfn(vma, addr: vmf->address, page_to_pfn(page)); |
503 | } |
504 | |
505 | unlock: |
506 | mutex_unlock(lock: &bo->lock); |
507 | |
508 | return ret; |
509 | } |
510 | |
511 | static const struct vm_operations_struct ivpu_vm_ops = { |
512 | .fault = ivpu_vm_fault, |
513 | .open = drm_gem_vm_open, |
514 | .close = drm_gem_vm_close, |
515 | }; |
516 | |
517 | static const struct drm_gem_object_funcs ivpu_gem_funcs = { |
518 | .free = ivpu_bo_free, |
519 | .mmap = ivpu_bo_mmap, |
520 | .vm_ops = &ivpu_vm_ops, |
521 | .get_sg_table = ivpu_bo_get_sg_table, |
522 | }; |
523 | |
524 | int |
525 | ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
526 | { |
527 | struct ivpu_file_priv *file_priv = file->driver_priv; |
528 | struct ivpu_device *vdev = file_priv->vdev; |
529 | struct drm_ivpu_bo_create *args = data; |
530 | u64 size = PAGE_ALIGN(args->size); |
531 | struct ivpu_bo *bo; |
532 | int ret; |
533 | |
534 | if (args->flags & ~DRM_IVPU_BO_FLAGS) |
535 | return -EINVAL; |
536 | |
537 | if (size == 0) |
538 | return -EINVAL; |
539 | |
540 | bo = ivpu_bo_alloc(vdev, mmu_context: &file_priv->ctx, size, flags: args->flags, ops: &shmem_ops, NULL, user_ptr: 0); |
541 | if (IS_ERR(ptr: bo)) { |
542 | ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)" , |
543 | bo, file_priv->ctx.id, args->size, args->flags); |
544 | return PTR_ERR(ptr: bo); |
545 | } |
546 | |
547 | ret = drm_gem_handle_create(file_priv: file, obj: &bo->base, handlep: &bo->handle); |
548 | if (!ret) { |
549 | args->vpu_addr = bo->vpu_addr; |
550 | args->handle = bo->handle; |
551 | } |
552 | |
553 | drm_gem_object_put(obj: &bo->base); |
554 | |
555 | ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n" , |
556 | file_priv->ctx.id, bo->vpu_addr, ivpu_bo_size(bo), bo->flags); |
557 | |
558 | return ret; |
559 | } |
560 | |
561 | struct ivpu_bo * |
562 | ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags) |
563 | { |
564 | const struct ivpu_addr_range *range; |
565 | struct ivpu_addr_range fixed_range; |
566 | struct ivpu_bo *bo; |
567 | pgprot_t prot; |
568 | int ret; |
569 | |
570 | drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr)); |
571 | drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size)); |
572 | |
573 | if (vpu_addr) { |
574 | fixed_range.start = vpu_addr; |
575 | fixed_range.end = vpu_addr + size; |
576 | range = &fixed_range; |
577 | } else { |
578 | range = &vdev->hw->ranges.global; |
579 | } |
580 | |
581 | bo = ivpu_bo_alloc(vdev, mmu_context: &vdev->gctx, size, flags, ops: &internal_ops, range, user_ptr: 0); |
582 | if (IS_ERR(ptr: bo)) { |
583 | ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)" , |
584 | bo, vpu_addr, size, flags); |
585 | return NULL; |
586 | } |
587 | |
588 | ret = ivpu_bo_pin(bo); |
589 | if (ret) |
590 | goto err_put; |
591 | |
592 | if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) |
593 | drm_clflush_pages(pages: bo->pages, num_pages: ivpu_bo_size(bo) >> PAGE_SHIFT); |
594 | |
595 | if (bo->flags & DRM_IVPU_BO_WC) |
596 | set_pages_array_wc(pages: bo->pages, addrinarray: ivpu_bo_size(bo) >> PAGE_SHIFT); |
597 | else if (bo->flags & DRM_IVPU_BO_UNCACHED) |
598 | set_pages_array_uc(pages: bo->pages, addrinarray: ivpu_bo_size(bo) >> PAGE_SHIFT); |
599 | |
600 | prot = ivpu_bo_pgprot(bo, PAGE_KERNEL); |
601 | bo->kvaddr = vmap(pages: bo->pages, count: ivpu_bo_size(bo) >> PAGE_SHIFT, VM_MAP, prot); |
602 | if (!bo->kvaddr) { |
603 | ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n" ); |
604 | goto err_put; |
605 | } |
606 | |
607 | ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n" , |
608 | bo->vpu_addr, ivpu_bo_size(bo), flags); |
609 | |
610 | return bo; |
611 | |
612 | err_put: |
613 | drm_gem_object_put(obj: &bo->base); |
614 | return NULL; |
615 | } |
616 | |
617 | void ivpu_bo_free_internal(struct ivpu_bo *bo) |
618 | { |
619 | drm_gem_object_put(obj: &bo->base); |
620 | } |
621 | |
622 | struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) |
623 | { |
624 | struct ivpu_device *vdev = to_ivpu_device(dev); |
625 | struct dma_buf_attachment *attach; |
626 | struct ivpu_bo *bo; |
627 | |
628 | attach = dma_buf_attach(dmabuf: buf, dev: dev->dev); |
629 | if (IS_ERR(ptr: attach)) |
630 | return ERR_CAST(ptr: attach); |
631 | |
632 | get_dma_buf(dmabuf: buf); |
633 | |
634 | bo = ivpu_bo_alloc(vdev, NULL, size: buf->size, DRM_IVPU_BO_MAPPABLE, ops: &prime_ops, NULL, user_ptr: 0); |
635 | if (IS_ERR(ptr: bo)) { |
636 | ivpu_err(vdev, "Failed to import BO: %pe (size %lu)" , bo, buf->size); |
637 | goto err_detach; |
638 | } |
639 | |
640 | lockdep_set_class(&bo->lock, &prime_bo_lock_class_key); |
641 | |
642 | bo->base.import_attach = attach; |
643 | |
644 | return &bo->base; |
645 | |
646 | err_detach: |
647 | dma_buf_detach(dmabuf: buf, attach); |
648 | dma_buf_put(dmabuf: buf); |
649 | return ERR_CAST(ptr: bo); |
650 | } |
651 | |
652 | int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
653 | { |
654 | struct ivpu_file_priv *file_priv = file->driver_priv; |
655 | struct ivpu_device *vdev = to_ivpu_device(dev); |
656 | struct drm_ivpu_bo_info *args = data; |
657 | struct drm_gem_object *obj; |
658 | struct ivpu_bo *bo; |
659 | int ret = 0; |
660 | |
661 | obj = drm_gem_object_lookup(filp: file, handle: args->handle); |
662 | if (!obj) |
663 | return -ENOENT; |
664 | |
665 | bo = to_ivpu_bo(obj); |
666 | |
667 | mutex_lock(&bo->lock); |
668 | |
669 | if (!bo->ctx) { |
670 | ret = ivpu_bo_alloc_vpu_addr(bo, ctx: &file_priv->ctx, NULL); |
671 | if (ret) { |
672 | ivpu_err(vdev, "Failed to allocate vpu_addr: %d\n" , ret); |
673 | goto unlock; |
674 | } |
675 | } |
676 | |
677 | args->flags = bo->flags; |
678 | args->mmap_offset = drm_vma_node_offset_addr(node: &obj->vma_node); |
679 | args->vpu_addr = bo->vpu_addr; |
680 | args->size = obj->size; |
681 | unlock: |
682 | mutex_unlock(lock: &bo->lock); |
683 | drm_gem_object_put(obj); |
684 | return ret; |
685 | } |
686 | |
687 | int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
688 | { |
689 | struct drm_ivpu_bo_wait *args = data; |
690 | struct drm_gem_object *obj; |
691 | unsigned long timeout; |
692 | long ret; |
693 | |
694 | timeout = drm_timeout_abs_to_jiffies(timeout_nsec: args->timeout_ns); |
695 | |
696 | obj = drm_gem_object_lookup(filp: file, handle: args->handle); |
697 | if (!obj) |
698 | return -EINVAL; |
699 | |
700 | ret = dma_resv_wait_timeout(obj: obj->resv, usage: DMA_RESV_USAGE_READ, intr: true, timeout); |
701 | if (ret == 0) { |
702 | ret = -ETIMEDOUT; |
703 | } else if (ret > 0) { |
704 | ret = 0; |
705 | args->job_status = to_ivpu_bo(obj)->job_status; |
706 | } |
707 | |
708 | drm_gem_object_put(obj); |
709 | |
710 | return ret; |
711 | } |
712 | |
713 | static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p) |
714 | { |
715 | unsigned long dma_refcount = 0; |
716 | |
717 | if (bo->base.dma_buf && bo->base.dma_buf->file) |
718 | dma_refcount = atomic_long_read(v: &bo->base.dma_buf->file->f_count); |
719 | |
720 | drm_printf(p, f: "%5u %6d %16llx %10lu %10u %12lu %14s\n" , |
721 | bo->ctx->id, bo->handle, bo->vpu_addr, ivpu_bo_size(bo), |
722 | kref_read(kref: &bo->base.refcount), dma_refcount, bo->ops->name); |
723 | } |
724 | |
725 | void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p) |
726 | { |
727 | struct ivpu_device *vdev = to_ivpu_device(dev); |
728 | struct ivpu_file_priv *file_priv; |
729 | unsigned long ctx_id; |
730 | struct ivpu_bo *bo; |
731 | |
732 | drm_printf(p, f: "%5s %6s %16s %10s %10s %12s %14s\n" , |
733 | "ctx" , "handle" , "vpu_addr" , "size" , "refcount" , "dma_refcount" , "type" ); |
734 | |
735 | mutex_lock(&vdev->gctx.lock); |
736 | list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node) |
737 | ivpu_bo_print_info(bo, p); |
738 | mutex_unlock(lock: &vdev->gctx.lock); |
739 | |
740 | xa_for_each(&vdev->context_xa, ctx_id, file_priv) { |
741 | file_priv = ivpu_file_priv_get_by_ctx_id(vdev, id: ctx_id); |
742 | if (!file_priv) |
743 | continue; |
744 | |
745 | mutex_lock(&file_priv->ctx.lock); |
746 | list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node) |
747 | ivpu_bo_print_info(bo, p); |
748 | mutex_unlock(lock: &file_priv->ctx.lock); |
749 | |
750 | ivpu_file_priv_put(link: &file_priv); |
751 | } |
752 | } |
753 | |
754 | void ivpu_bo_list_print(struct drm_device *dev) |
755 | { |
756 | struct drm_printer p = drm_info_printer(dev: dev->dev); |
757 | |
758 | ivpu_bo_list(dev, p: &p); |
759 | } |
760 | |