1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2013 Red Hat |
4 | * Author: Rob Clark <robdclark@gmail.com> |
5 | */ |
6 | |
7 | #include <linux/dma-map-ops.h> |
8 | #include <linux/vmalloc.h> |
9 | #include <linux/spinlock.h> |
10 | #include <linux/shmem_fs.h> |
11 | #include <linux/dma-buf.h> |
12 | #include <linux/pfn_t.h> |
13 | |
14 | #include <drm/drm_prime.h> |
15 | |
16 | #include "msm_drv.h" |
17 | #include "msm_fence.h" |
18 | #include "msm_gem.h" |
19 | #include "msm_gpu.h" |
20 | #include "msm_mmu.h" |
21 | |
22 | static dma_addr_t physaddr(struct drm_gem_object *obj) |
23 | { |
24 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
25 | struct msm_drm_private *priv = obj->dev->dev_private; |
26 | return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + |
27 | priv->vram.paddr; |
28 | } |
29 | |
30 | static bool use_pages(struct drm_gem_object *obj) |
31 | { |
32 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
33 | return !msm_obj->vram_node; |
34 | } |
35 | |
36 | /* |
37 | * Cache sync.. this is a bit over-complicated, to fit dma-mapping |
38 | * API. Really GPU cache is out of scope here (handled on cmdstream) |
39 | * and all we need to do is invalidate newly allocated pages before |
40 | * mapping to CPU as uncached/writecombine. |
41 | * |
42 | * On top of this, we have the added headache, that depending on |
43 | * display generation, the display's iommu may be wired up to either |
44 | * the toplevel drm device (mdss), or to the mdp sub-node, meaning |
45 | * that here we either have dma-direct or iommu ops. |
46 | * |
47 | * Let this be a cautionary tail of abstraction gone wrong. |
48 | */ |
49 | |
50 | static void sync_for_device(struct msm_gem_object *msm_obj) |
51 | { |
52 | struct device *dev = msm_obj->base.dev->dev; |
53 | |
54 | dma_map_sgtable(dev, sgt: msm_obj->sgt, dir: DMA_BIDIRECTIONAL, attrs: 0); |
55 | } |
56 | |
57 | static void sync_for_cpu(struct msm_gem_object *msm_obj) |
58 | { |
59 | struct device *dev = msm_obj->base.dev->dev; |
60 | |
61 | dma_unmap_sgtable(dev, sgt: msm_obj->sgt, dir: DMA_BIDIRECTIONAL, attrs: 0); |
62 | } |
63 | |
64 | static void update_lru_active(struct drm_gem_object *obj) |
65 | { |
66 | struct msm_drm_private *priv = obj->dev->dev_private; |
67 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
68 | |
69 | GEM_WARN_ON(!msm_obj->pages); |
70 | |
71 | if (msm_obj->pin_count) { |
72 | drm_gem_lru_move_tail_locked(lru: &priv->lru.pinned, obj); |
73 | } else if (msm_obj->madv == MSM_MADV_WILLNEED) { |
74 | drm_gem_lru_move_tail_locked(lru: &priv->lru.willneed, obj); |
75 | } else { |
76 | GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED); |
77 | |
78 | drm_gem_lru_move_tail_locked(lru: &priv->lru.dontneed, obj); |
79 | } |
80 | } |
81 | |
82 | static void update_lru_locked(struct drm_gem_object *obj) |
83 | { |
84 | struct msm_drm_private *priv = obj->dev->dev_private; |
85 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
86 | |
87 | msm_gem_assert_locked(obj: &msm_obj->base); |
88 | |
89 | if (!msm_obj->pages) { |
90 | GEM_WARN_ON(msm_obj->pin_count); |
91 | |
92 | drm_gem_lru_move_tail_locked(lru: &priv->lru.unbacked, obj); |
93 | } else { |
94 | update_lru_active(obj); |
95 | } |
96 | } |
97 | |
98 | static void update_lru(struct drm_gem_object *obj) |
99 | { |
100 | struct msm_drm_private *priv = obj->dev->dev_private; |
101 | |
102 | mutex_lock(&priv->lru.lock); |
103 | update_lru_locked(obj); |
104 | mutex_unlock(lock: &priv->lru.lock); |
105 | } |
106 | |
107 | /* allocate pages from VRAM carveout, used when no IOMMU: */ |
108 | static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) |
109 | { |
110 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
111 | struct msm_drm_private *priv = obj->dev->dev_private; |
112 | dma_addr_t paddr; |
113 | struct page **p; |
114 | int ret, i; |
115 | |
116 | p = kvmalloc_array(n: npages, size: sizeof(struct page *), GFP_KERNEL); |
117 | if (!p) |
118 | return ERR_PTR(error: -ENOMEM); |
119 | |
120 | spin_lock(lock: &priv->vram.lock); |
121 | ret = drm_mm_insert_node(mm: &priv->vram.mm, node: msm_obj->vram_node, size: npages); |
122 | spin_unlock(lock: &priv->vram.lock); |
123 | if (ret) { |
124 | kvfree(addr: p); |
125 | return ERR_PTR(error: ret); |
126 | } |
127 | |
128 | paddr = physaddr(obj); |
129 | for (i = 0; i < npages; i++) { |
130 | p[i] = pfn_to_page(__phys_to_pfn(paddr)); |
131 | paddr += PAGE_SIZE; |
132 | } |
133 | |
134 | return p; |
135 | } |
136 | |
137 | static struct page **get_pages(struct drm_gem_object *obj) |
138 | { |
139 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
140 | |
141 | msm_gem_assert_locked(obj); |
142 | |
143 | if (!msm_obj->pages) { |
144 | struct drm_device *dev = obj->dev; |
145 | struct page **p; |
146 | int npages = obj->size >> PAGE_SHIFT; |
147 | |
148 | if (use_pages(obj)) |
149 | p = drm_gem_get_pages(obj); |
150 | else |
151 | p = get_pages_vram(obj, npages); |
152 | |
153 | if (IS_ERR(ptr: p)) { |
154 | DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n" , |
155 | PTR_ERR(p)); |
156 | return p; |
157 | } |
158 | |
159 | msm_obj->pages = p; |
160 | |
161 | msm_obj->sgt = drm_prime_pages_to_sg(dev: obj->dev, pages: p, nr_pages: npages); |
162 | if (IS_ERR(ptr: msm_obj->sgt)) { |
163 | void *ptr = ERR_CAST(ptr: msm_obj->sgt); |
164 | |
165 | DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n" ); |
166 | msm_obj->sgt = NULL; |
167 | return ptr; |
168 | } |
169 | |
170 | /* For non-cached buffers, ensure the new pages are clean |
171 | * because display controller, GPU, etc. are not coherent: |
172 | */ |
173 | if (msm_obj->flags & MSM_BO_WC) |
174 | sync_for_device(msm_obj); |
175 | |
176 | update_lru(obj); |
177 | } |
178 | |
179 | return msm_obj->pages; |
180 | } |
181 | |
182 | static void put_pages_vram(struct drm_gem_object *obj) |
183 | { |
184 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
185 | struct msm_drm_private *priv = obj->dev->dev_private; |
186 | |
187 | spin_lock(lock: &priv->vram.lock); |
188 | drm_mm_remove_node(node: msm_obj->vram_node); |
189 | spin_unlock(lock: &priv->vram.lock); |
190 | |
191 | kvfree(addr: msm_obj->pages); |
192 | } |
193 | |
194 | static void put_pages(struct drm_gem_object *obj) |
195 | { |
196 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
197 | |
198 | if (msm_obj->pages) { |
199 | if (msm_obj->sgt) { |
200 | /* For non-cached buffers, ensure the new |
201 | * pages are clean because display controller, |
202 | * GPU, etc. are not coherent: |
203 | */ |
204 | if (msm_obj->flags & MSM_BO_WC) |
205 | sync_for_cpu(msm_obj); |
206 | |
207 | sg_free_table(msm_obj->sgt); |
208 | kfree(objp: msm_obj->sgt); |
209 | msm_obj->sgt = NULL; |
210 | } |
211 | |
212 | if (use_pages(obj)) |
213 | drm_gem_put_pages(obj, pages: msm_obj->pages, dirty: true, accessed: false); |
214 | else |
215 | put_pages_vram(obj); |
216 | |
217 | msm_obj->pages = NULL; |
218 | update_lru(obj); |
219 | } |
220 | } |
221 | |
222 | static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj, |
223 | unsigned madv) |
224 | { |
225 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
226 | |
227 | msm_gem_assert_locked(obj); |
228 | |
229 | if (msm_obj->madv > madv) { |
230 | DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n" , |
231 | msm_obj->madv, madv); |
232 | return ERR_PTR(error: -EBUSY); |
233 | } |
234 | |
235 | return get_pages(obj); |
236 | } |
237 | |
238 | /* |
239 | * Update the pin count of the object, call under lru.lock |
240 | */ |
241 | void msm_gem_pin_obj_locked(struct drm_gem_object *obj) |
242 | { |
243 | struct msm_drm_private *priv = obj->dev->dev_private; |
244 | |
245 | msm_gem_assert_locked(obj); |
246 | |
247 | to_msm_bo(obj)->pin_count++; |
248 | drm_gem_lru_move_tail_locked(lru: &priv->lru.pinned, obj); |
249 | } |
250 | |
251 | static void pin_obj_locked(struct drm_gem_object *obj) |
252 | { |
253 | struct msm_drm_private *priv = obj->dev->dev_private; |
254 | |
255 | mutex_lock(&priv->lru.lock); |
256 | msm_gem_pin_obj_locked(obj); |
257 | mutex_unlock(lock: &priv->lru.lock); |
258 | } |
259 | |
260 | struct page **msm_gem_pin_pages(struct drm_gem_object *obj) |
261 | { |
262 | struct page **p; |
263 | |
264 | msm_gem_lock(obj); |
265 | p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED); |
266 | if (!IS_ERR(ptr: p)) |
267 | pin_obj_locked(obj); |
268 | msm_gem_unlock(obj); |
269 | |
270 | return p; |
271 | } |
272 | |
273 | void msm_gem_unpin_pages(struct drm_gem_object *obj) |
274 | { |
275 | msm_gem_lock(obj); |
276 | msm_gem_unpin_locked(obj); |
277 | msm_gem_unlock(obj); |
278 | } |
279 | |
280 | static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot) |
281 | { |
282 | if (msm_obj->flags & MSM_BO_WC) |
283 | return pgprot_writecombine(prot); |
284 | return prot; |
285 | } |
286 | |
287 | static vm_fault_t msm_gem_fault(struct vm_fault *vmf) |
288 | { |
289 | struct vm_area_struct *vma = vmf->vma; |
290 | struct drm_gem_object *obj = vma->vm_private_data; |
291 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
292 | struct page **pages; |
293 | unsigned long pfn; |
294 | pgoff_t pgoff; |
295 | int err; |
296 | vm_fault_t ret; |
297 | |
298 | /* |
299 | * vm_ops.open/drm_gem_mmap_obj and close get and put |
300 | * a reference on obj. So, we dont need to hold one here. |
301 | */ |
302 | err = msm_gem_lock_interruptible(obj); |
303 | if (err) { |
304 | ret = VM_FAULT_NOPAGE; |
305 | goto out; |
306 | } |
307 | |
308 | if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { |
309 | msm_gem_unlock(obj); |
310 | return VM_FAULT_SIGBUS; |
311 | } |
312 | |
313 | /* make sure we have pages attached now */ |
314 | pages = get_pages(obj); |
315 | if (IS_ERR(ptr: pages)) { |
316 | ret = vmf_error(err: PTR_ERR(ptr: pages)); |
317 | goto out_unlock; |
318 | } |
319 | |
320 | /* We don't use vmf->pgoff since that has the fake offset: */ |
321 | pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; |
322 | |
323 | pfn = page_to_pfn(pages[pgoff]); |
324 | |
325 | VERB("Inserting %p pfn %lx, pa %lx" , (void *)vmf->address, |
326 | pfn, pfn << PAGE_SHIFT); |
327 | |
328 | ret = vmf_insert_pfn(vma, addr: vmf->address, pfn); |
329 | |
330 | out_unlock: |
331 | msm_gem_unlock(obj); |
332 | out: |
333 | return ret; |
334 | } |
335 | |
336 | /** get mmap offset */ |
337 | static uint64_t mmap_offset(struct drm_gem_object *obj) |
338 | { |
339 | struct drm_device *dev = obj->dev; |
340 | int ret; |
341 | |
342 | msm_gem_assert_locked(obj); |
343 | |
344 | /* Make it mmapable */ |
345 | ret = drm_gem_create_mmap_offset(obj); |
346 | |
347 | if (ret) { |
348 | DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n" ); |
349 | return 0; |
350 | } |
351 | |
352 | return drm_vma_node_offset_addr(node: &obj->vma_node); |
353 | } |
354 | |
355 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) |
356 | { |
357 | uint64_t offset; |
358 | |
359 | msm_gem_lock(obj); |
360 | offset = mmap_offset(obj); |
361 | msm_gem_unlock(obj); |
362 | return offset; |
363 | } |
364 | |
365 | static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, |
366 | struct msm_gem_address_space *aspace) |
367 | { |
368 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
369 | struct msm_gem_vma *vma; |
370 | |
371 | msm_gem_assert_locked(obj); |
372 | |
373 | vma = msm_gem_vma_new(aspace); |
374 | if (!vma) |
375 | return ERR_PTR(error: -ENOMEM); |
376 | |
377 | list_add_tail(new: &vma->list, head: &msm_obj->vmas); |
378 | |
379 | return vma; |
380 | } |
381 | |
382 | static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, |
383 | struct msm_gem_address_space *aspace) |
384 | { |
385 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
386 | struct msm_gem_vma *vma; |
387 | |
388 | msm_gem_assert_locked(obj); |
389 | |
390 | list_for_each_entry(vma, &msm_obj->vmas, list) { |
391 | if (vma->aspace == aspace) |
392 | return vma; |
393 | } |
394 | |
395 | return NULL; |
396 | } |
397 | |
398 | static void del_vma(struct msm_gem_vma *vma) |
399 | { |
400 | if (!vma) |
401 | return; |
402 | |
403 | list_del(entry: &vma->list); |
404 | kfree(objp: vma); |
405 | } |
406 | |
407 | /* |
408 | * If close is true, this also closes the VMA (releasing the allocated |
409 | * iova range) in addition to removing the iommu mapping. In the eviction |
410 | * case (!close), we keep the iova allocated, but only remove the iommu |
411 | * mapping. |
412 | */ |
413 | static void |
414 | put_iova_spaces(struct drm_gem_object *obj, bool close) |
415 | { |
416 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
417 | struct msm_gem_vma *vma; |
418 | |
419 | msm_gem_assert_locked(obj); |
420 | |
421 | list_for_each_entry(vma, &msm_obj->vmas, list) { |
422 | if (vma->aspace) { |
423 | msm_gem_vma_purge(vma); |
424 | if (close) |
425 | msm_gem_vma_close(vma); |
426 | } |
427 | } |
428 | } |
429 | |
430 | /* Called with msm_obj locked */ |
431 | static void |
432 | put_iova_vmas(struct drm_gem_object *obj) |
433 | { |
434 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
435 | struct msm_gem_vma *vma, *tmp; |
436 | |
437 | msm_gem_assert_locked(obj); |
438 | |
439 | list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { |
440 | del_vma(vma); |
441 | } |
442 | } |
443 | |
444 | static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj, |
445 | struct msm_gem_address_space *aspace, |
446 | u64 range_start, u64 range_end) |
447 | { |
448 | struct msm_gem_vma *vma; |
449 | |
450 | msm_gem_assert_locked(obj); |
451 | |
452 | vma = lookup_vma(obj, aspace); |
453 | |
454 | if (!vma) { |
455 | int ret; |
456 | |
457 | vma = add_vma(obj, aspace); |
458 | if (IS_ERR(ptr: vma)) |
459 | return vma; |
460 | |
461 | ret = msm_gem_vma_init(vma, size: obj->size, |
462 | range_start, range_end); |
463 | if (ret) { |
464 | del_vma(vma); |
465 | return ERR_PTR(error: ret); |
466 | } |
467 | } else { |
468 | GEM_WARN_ON(vma->iova < range_start); |
469 | GEM_WARN_ON((vma->iova + obj->size) > range_end); |
470 | } |
471 | |
472 | return vma; |
473 | } |
474 | |
475 | int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) |
476 | { |
477 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
478 | struct page **pages; |
479 | int prot = IOMMU_READ; |
480 | |
481 | if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) |
482 | prot |= IOMMU_WRITE; |
483 | |
484 | if (msm_obj->flags & MSM_BO_MAP_PRIV) |
485 | prot |= IOMMU_PRIV; |
486 | |
487 | if (msm_obj->flags & MSM_BO_CACHED_COHERENT) |
488 | prot |= IOMMU_CACHE; |
489 | |
490 | msm_gem_assert_locked(obj); |
491 | |
492 | pages = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED); |
493 | if (IS_ERR(ptr: pages)) |
494 | return PTR_ERR(ptr: pages); |
495 | |
496 | return msm_gem_vma_map(vma, prot, sgt: msm_obj->sgt, size: obj->size); |
497 | } |
498 | |
499 | void msm_gem_unpin_locked(struct drm_gem_object *obj) |
500 | { |
501 | struct msm_drm_private *priv = obj->dev->dev_private; |
502 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
503 | |
504 | msm_gem_assert_locked(obj); |
505 | |
506 | mutex_lock(&priv->lru.lock); |
507 | msm_obj->pin_count--; |
508 | GEM_WARN_ON(msm_obj->pin_count < 0); |
509 | update_lru_locked(obj); |
510 | mutex_unlock(lock: &priv->lru.lock); |
511 | } |
512 | |
513 | /* Special unpin path for use in fence-signaling path, avoiding the need |
514 | * to hold the obj lock by only depending on things that a protected by |
515 | * the LRU lock. In particular we know that that we already have backing |
516 | * and and that the object's dma_resv has the fence for the current |
517 | * submit/job which will prevent us racing against page eviction. |
518 | */ |
519 | void msm_gem_unpin_active(struct drm_gem_object *obj) |
520 | { |
521 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
522 | |
523 | msm_obj->pin_count--; |
524 | GEM_WARN_ON(msm_obj->pin_count < 0); |
525 | update_lru_active(obj); |
526 | } |
527 | |
528 | struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj, |
529 | struct msm_gem_address_space *aspace) |
530 | { |
531 | return get_vma_locked(obj, aspace, range_start: 0, U64_MAX); |
532 | } |
533 | |
534 | static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, |
535 | struct msm_gem_address_space *aspace, uint64_t *iova, |
536 | u64 range_start, u64 range_end) |
537 | { |
538 | struct msm_gem_vma *vma; |
539 | int ret; |
540 | |
541 | msm_gem_assert_locked(obj); |
542 | |
543 | vma = get_vma_locked(obj, aspace, range_start, range_end); |
544 | if (IS_ERR(ptr: vma)) |
545 | return PTR_ERR(ptr: vma); |
546 | |
547 | ret = msm_gem_pin_vma_locked(obj, vma); |
548 | if (!ret) { |
549 | *iova = vma->iova; |
550 | pin_obj_locked(obj); |
551 | } |
552 | |
553 | return ret; |
554 | } |
555 | |
556 | /* |
557 | * get iova and pin it. Should have a matching put |
558 | * limits iova to specified range (in pages) |
559 | */ |
560 | int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, |
561 | struct msm_gem_address_space *aspace, uint64_t *iova, |
562 | u64 range_start, u64 range_end) |
563 | { |
564 | int ret; |
565 | |
566 | msm_gem_lock(obj); |
567 | ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end); |
568 | msm_gem_unlock(obj); |
569 | |
570 | return ret; |
571 | } |
572 | |
573 | /* get iova and pin it. Should have a matching put */ |
574 | int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, |
575 | struct msm_gem_address_space *aspace, uint64_t *iova) |
576 | { |
577 | return msm_gem_get_and_pin_iova_range(obj, aspace, iova, range_start: 0, U64_MAX); |
578 | } |
579 | |
580 | /* |
581 | * Get an iova but don't pin it. Doesn't need a put because iovas are currently |
582 | * valid for the life of the object |
583 | */ |
584 | int msm_gem_get_iova(struct drm_gem_object *obj, |
585 | struct msm_gem_address_space *aspace, uint64_t *iova) |
586 | { |
587 | struct msm_gem_vma *vma; |
588 | int ret = 0; |
589 | |
590 | msm_gem_lock(obj); |
591 | vma = get_vma_locked(obj, aspace, range_start: 0, U64_MAX); |
592 | if (IS_ERR(ptr: vma)) { |
593 | ret = PTR_ERR(ptr: vma); |
594 | } else { |
595 | *iova = vma->iova; |
596 | } |
597 | msm_gem_unlock(obj); |
598 | |
599 | return ret; |
600 | } |
601 | |
602 | static int clear_iova(struct drm_gem_object *obj, |
603 | struct msm_gem_address_space *aspace) |
604 | { |
605 | struct msm_gem_vma *vma = lookup_vma(obj, aspace); |
606 | |
607 | if (!vma) |
608 | return 0; |
609 | |
610 | msm_gem_vma_purge(vma); |
611 | msm_gem_vma_close(vma); |
612 | del_vma(vma); |
613 | |
614 | return 0; |
615 | } |
616 | |
617 | /* |
618 | * Get the requested iova but don't pin it. Fails if the requested iova is |
619 | * not available. Doesn't need a put because iovas are currently valid for |
620 | * the life of the object. |
621 | * |
622 | * Setting an iova of zero will clear the vma. |
623 | */ |
624 | int msm_gem_set_iova(struct drm_gem_object *obj, |
625 | struct msm_gem_address_space *aspace, uint64_t iova) |
626 | { |
627 | int ret = 0; |
628 | |
629 | msm_gem_lock(obj); |
630 | if (!iova) { |
631 | ret = clear_iova(obj, aspace); |
632 | } else { |
633 | struct msm_gem_vma *vma; |
634 | vma = get_vma_locked(obj, aspace, range_start: iova, range_end: iova + obj->size); |
635 | if (IS_ERR(ptr: vma)) { |
636 | ret = PTR_ERR(ptr: vma); |
637 | } else if (GEM_WARN_ON(vma->iova != iova)) { |
638 | clear_iova(obj, aspace); |
639 | ret = -EBUSY; |
640 | } |
641 | } |
642 | msm_gem_unlock(obj); |
643 | |
644 | return ret; |
645 | } |
646 | |
647 | /* |
648 | * Unpin a iova by updating the reference counts. The memory isn't actually |
649 | * purged until something else (shrinker, mm_notifier, destroy, etc) decides |
650 | * to get rid of it |
651 | */ |
652 | void msm_gem_unpin_iova(struct drm_gem_object *obj, |
653 | struct msm_gem_address_space *aspace) |
654 | { |
655 | struct msm_gem_vma *vma; |
656 | |
657 | msm_gem_lock(obj); |
658 | vma = lookup_vma(obj, aspace); |
659 | if (!GEM_WARN_ON(!vma)) { |
660 | msm_gem_unpin_locked(obj); |
661 | } |
662 | msm_gem_unlock(obj); |
663 | } |
664 | |
665 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, |
666 | struct drm_mode_create_dumb *args) |
667 | { |
668 | args->pitch = align_pitch(width: args->width, bpp: args->bpp); |
669 | args->size = PAGE_ALIGN(args->pitch * args->height); |
670 | return msm_gem_new_handle(dev, file, size: args->size, |
671 | MSM_BO_SCANOUT | MSM_BO_WC, handle: &args->handle, name: "dumb" ); |
672 | } |
673 | |
674 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
675 | uint32_t handle, uint64_t *offset) |
676 | { |
677 | struct drm_gem_object *obj; |
678 | int ret = 0; |
679 | |
680 | /* GEM does all our handle to object mapping */ |
681 | obj = drm_gem_object_lookup(filp: file, handle); |
682 | if (obj == NULL) { |
683 | ret = -ENOENT; |
684 | goto fail; |
685 | } |
686 | |
687 | *offset = msm_gem_mmap_offset(obj); |
688 | |
689 | drm_gem_object_put(obj); |
690 | |
691 | fail: |
692 | return ret; |
693 | } |
694 | |
695 | static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) |
696 | { |
697 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
698 | struct page **pages; |
699 | int ret = 0; |
700 | |
701 | msm_gem_assert_locked(obj); |
702 | |
703 | if (obj->import_attach) |
704 | return ERR_PTR(error: -ENODEV); |
705 | |
706 | pages = msm_gem_pin_pages_locked(obj, madv); |
707 | if (IS_ERR(ptr: pages)) |
708 | return ERR_CAST(ptr: pages); |
709 | |
710 | pin_obj_locked(obj); |
711 | |
712 | /* increment vmap_count *before* vmap() call, so shrinker can |
713 | * check vmap_count (is_vunmapable()) outside of msm_obj lock. |
714 | * This guarantees that we won't try to msm_gem_vunmap() this |
715 | * same object from within the vmap() call (while we already |
716 | * hold msm_obj lock) |
717 | */ |
718 | msm_obj->vmap_count++; |
719 | |
720 | if (!msm_obj->vaddr) { |
721 | msm_obj->vaddr = vmap(pages, count: obj->size >> PAGE_SHIFT, |
722 | VM_MAP, prot: msm_gem_pgprot(msm_obj, PAGE_KERNEL)); |
723 | if (msm_obj->vaddr == NULL) { |
724 | ret = -ENOMEM; |
725 | goto fail; |
726 | } |
727 | } |
728 | |
729 | return msm_obj->vaddr; |
730 | |
731 | fail: |
732 | msm_obj->vmap_count--; |
733 | msm_gem_unpin_locked(obj); |
734 | return ERR_PTR(error: ret); |
735 | } |
736 | |
737 | void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) |
738 | { |
739 | return get_vaddr(obj, MSM_MADV_WILLNEED); |
740 | } |
741 | |
742 | void *msm_gem_get_vaddr(struct drm_gem_object *obj) |
743 | { |
744 | void *ret; |
745 | |
746 | msm_gem_lock(obj); |
747 | ret = msm_gem_get_vaddr_locked(obj); |
748 | msm_gem_unlock(obj); |
749 | |
750 | return ret; |
751 | } |
752 | |
753 | /* |
754 | * Don't use this! It is for the very special case of dumping |
755 | * submits from GPU hangs or faults, were the bo may already |
756 | * be MSM_MADV_DONTNEED, but we know the buffer is still on the |
757 | * active list. |
758 | */ |
759 | void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) |
760 | { |
761 | return get_vaddr(obj, __MSM_MADV_PURGED); |
762 | } |
763 | |
764 | void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) |
765 | { |
766 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
767 | |
768 | msm_gem_assert_locked(obj); |
769 | GEM_WARN_ON(msm_obj->vmap_count < 1); |
770 | |
771 | msm_obj->vmap_count--; |
772 | msm_gem_unpin_locked(obj); |
773 | } |
774 | |
775 | void msm_gem_put_vaddr(struct drm_gem_object *obj) |
776 | { |
777 | msm_gem_lock(obj); |
778 | msm_gem_put_vaddr_locked(obj); |
779 | msm_gem_unlock(obj); |
780 | } |
781 | |
782 | /* Update madvise status, returns true if not purged, else |
783 | * false or -errno. |
784 | */ |
785 | int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) |
786 | { |
787 | struct msm_drm_private *priv = obj->dev->dev_private; |
788 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
789 | |
790 | msm_gem_lock(obj); |
791 | |
792 | mutex_lock(&priv->lru.lock); |
793 | |
794 | if (msm_obj->madv != __MSM_MADV_PURGED) |
795 | msm_obj->madv = madv; |
796 | |
797 | madv = msm_obj->madv; |
798 | |
799 | /* If the obj is inactive, we might need to move it |
800 | * between inactive lists |
801 | */ |
802 | update_lru_locked(obj); |
803 | |
804 | mutex_unlock(lock: &priv->lru.lock); |
805 | |
806 | msm_gem_unlock(obj); |
807 | |
808 | return (madv != __MSM_MADV_PURGED); |
809 | } |
810 | |
811 | void msm_gem_purge(struct drm_gem_object *obj) |
812 | { |
813 | struct drm_device *dev = obj->dev; |
814 | struct msm_drm_private *priv = obj->dev->dev_private; |
815 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
816 | |
817 | msm_gem_assert_locked(obj); |
818 | GEM_WARN_ON(!is_purgeable(msm_obj)); |
819 | |
820 | /* Get rid of any iommu mapping(s): */ |
821 | put_iova_spaces(obj, close: true); |
822 | |
823 | msm_gem_vunmap(obj); |
824 | |
825 | drm_vma_node_unmap(node: &obj->vma_node, file_mapping: dev->anon_inode->i_mapping); |
826 | |
827 | put_pages(obj); |
828 | |
829 | put_iova_vmas(obj); |
830 | |
831 | mutex_lock(&priv->lru.lock); |
832 | /* A one-way transition: */ |
833 | msm_obj->madv = __MSM_MADV_PURGED; |
834 | mutex_unlock(lock: &priv->lru.lock); |
835 | |
836 | drm_gem_free_mmap_offset(obj); |
837 | |
838 | /* Our goal here is to return as much of the memory as |
839 | * is possible back to the system as we are called from OOM. |
840 | * To do this we must instruct the shmfs to drop all of its |
841 | * backing pages, *now*. |
842 | */ |
843 | shmem_truncate_range(inode: file_inode(f: obj->filp), start: 0, end: (loff_t)-1); |
844 | |
845 | invalidate_mapping_pages(mapping: file_inode(f: obj->filp)->i_mapping, |
846 | start: 0, end: (loff_t)-1); |
847 | } |
848 | |
849 | /* |
850 | * Unpin the backing pages and make them available to be swapped out. |
851 | */ |
852 | void msm_gem_evict(struct drm_gem_object *obj) |
853 | { |
854 | struct drm_device *dev = obj->dev; |
855 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
856 | |
857 | msm_gem_assert_locked(obj); |
858 | GEM_WARN_ON(is_unevictable(msm_obj)); |
859 | |
860 | /* Get rid of any iommu mapping(s): */ |
861 | put_iova_spaces(obj, close: false); |
862 | |
863 | drm_vma_node_unmap(node: &obj->vma_node, file_mapping: dev->anon_inode->i_mapping); |
864 | |
865 | put_pages(obj); |
866 | } |
867 | |
868 | void msm_gem_vunmap(struct drm_gem_object *obj) |
869 | { |
870 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
871 | |
872 | msm_gem_assert_locked(obj); |
873 | |
874 | if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj))) |
875 | return; |
876 | |
877 | vunmap(addr: msm_obj->vaddr); |
878 | msm_obj->vaddr = NULL; |
879 | } |
880 | |
881 | bool msm_gem_active(struct drm_gem_object *obj) |
882 | { |
883 | msm_gem_assert_locked(obj); |
884 | |
885 | if (to_msm_bo(obj)->pin_count) |
886 | return true; |
887 | |
888 | return !dma_resv_test_signaled(obj: obj->resv, usage: dma_resv_usage_rw(write: true)); |
889 | } |
890 | |
891 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) |
892 | { |
893 | bool write = !!(op & MSM_PREP_WRITE); |
894 | unsigned long remain = |
895 | op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); |
896 | long ret; |
897 | |
898 | if (op & MSM_PREP_BOOST) { |
899 | dma_resv_set_deadline(obj: obj->resv, usage: dma_resv_usage_rw(write), |
900 | deadline: ktime_get()); |
901 | } |
902 | |
903 | ret = dma_resv_wait_timeout(obj: obj->resv, usage: dma_resv_usage_rw(write), |
904 | intr: true, timeout: remain); |
905 | if (ret == 0) |
906 | return remain == 0 ? -EBUSY : -ETIMEDOUT; |
907 | else if (ret < 0) |
908 | return ret; |
909 | |
910 | /* TODO cache maintenance */ |
911 | |
912 | return 0; |
913 | } |
914 | |
915 | int msm_gem_cpu_fini(struct drm_gem_object *obj) |
916 | { |
917 | /* TODO cache maintenance */ |
918 | return 0; |
919 | } |
920 | |
921 | #ifdef CONFIG_DEBUG_FS |
922 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, |
923 | struct msm_gem_stats *stats) |
924 | { |
925 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
926 | struct dma_resv *robj = obj->resv; |
927 | struct msm_gem_vma *vma; |
928 | uint64_t off = drm_vma_node_start(node: &obj->vma_node); |
929 | const char *madv; |
930 | |
931 | msm_gem_lock(obj); |
932 | |
933 | stats->all.count++; |
934 | stats->all.size += obj->size; |
935 | |
936 | if (msm_gem_active(obj)) { |
937 | stats->active.count++; |
938 | stats->active.size += obj->size; |
939 | } |
940 | |
941 | if (msm_obj->pages) { |
942 | stats->resident.count++; |
943 | stats->resident.size += obj->size; |
944 | } |
945 | |
946 | switch (msm_obj->madv) { |
947 | case __MSM_MADV_PURGED: |
948 | stats->purged.count++; |
949 | stats->purged.size += obj->size; |
950 | madv = " purged" ; |
951 | break; |
952 | case MSM_MADV_DONTNEED: |
953 | stats->purgeable.count++; |
954 | stats->purgeable.size += obj->size; |
955 | madv = " purgeable" ; |
956 | break; |
957 | case MSM_MADV_WILLNEED: |
958 | default: |
959 | madv = "" ; |
960 | break; |
961 | } |
962 | |
963 | seq_printf(m, fmt: "%08x: %c %2d (%2d) %08llx %p" , |
964 | msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I', |
965 | obj->name, kref_read(kref: &obj->refcount), |
966 | off, msm_obj->vaddr); |
967 | |
968 | seq_printf(m, fmt: " %08zu %9s %-32s\n" , obj->size, madv, msm_obj->name); |
969 | |
970 | if (!list_empty(head: &msm_obj->vmas)) { |
971 | |
972 | seq_puts(m, s: " vmas:" ); |
973 | |
974 | list_for_each_entry(vma, &msm_obj->vmas, list) { |
975 | const char *name, *comm; |
976 | if (vma->aspace) { |
977 | struct msm_gem_address_space *aspace = vma->aspace; |
978 | struct task_struct *task = |
979 | get_pid_task(pid: aspace->pid, PIDTYPE_PID); |
980 | if (task) { |
981 | comm = kstrdup(s: task->comm, GFP_KERNEL); |
982 | put_task_struct(t: task); |
983 | } else { |
984 | comm = NULL; |
985 | } |
986 | name = aspace->name; |
987 | } else { |
988 | name = comm = NULL; |
989 | } |
990 | seq_printf(m, fmt: " [%s%s%s: aspace=%p, %08llx,%s]" , |
991 | name, comm ? ":" : "" , comm ? comm : "" , |
992 | vma->aspace, vma->iova, |
993 | vma->mapped ? "mapped" : "unmapped" ); |
994 | kfree(objp: comm); |
995 | } |
996 | |
997 | seq_puts(m, s: "\n" ); |
998 | } |
999 | |
1000 | dma_resv_describe(obj: robj, seq: m); |
1001 | msm_gem_unlock(obj); |
1002 | } |
1003 | |
1004 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) |
1005 | { |
1006 | struct msm_gem_stats stats = {}; |
1007 | struct msm_gem_object *msm_obj; |
1008 | |
1009 | seq_puts(m, s: " flags id ref offset kaddr size madv name\n" ); |
1010 | list_for_each_entry(msm_obj, list, node) { |
1011 | struct drm_gem_object *obj = &msm_obj->base; |
1012 | seq_puts(m, s: " " ); |
1013 | msm_gem_describe(obj, m, stats: &stats); |
1014 | } |
1015 | |
1016 | seq_printf(m, fmt: "Total: %4d objects, %9zu bytes\n" , |
1017 | stats.all.count, stats.all.size); |
1018 | seq_printf(m, fmt: "Active: %4d objects, %9zu bytes\n" , |
1019 | stats.active.count, stats.active.size); |
1020 | seq_printf(m, fmt: "Resident: %4d objects, %9zu bytes\n" , |
1021 | stats.resident.count, stats.resident.size); |
1022 | seq_printf(m, fmt: "Purgeable: %4d objects, %9zu bytes\n" , |
1023 | stats.purgeable.count, stats.purgeable.size); |
1024 | seq_printf(m, fmt: "Purged: %4d objects, %9zu bytes\n" , |
1025 | stats.purged.count, stats.purged.size); |
1026 | } |
1027 | #endif |
1028 | |
1029 | /* don't call directly! Use drm_gem_object_put() */ |
1030 | static void msm_gem_free_object(struct drm_gem_object *obj) |
1031 | { |
1032 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
1033 | struct drm_device *dev = obj->dev; |
1034 | struct msm_drm_private *priv = dev->dev_private; |
1035 | |
1036 | mutex_lock(&priv->obj_lock); |
1037 | list_del(entry: &msm_obj->node); |
1038 | mutex_unlock(lock: &priv->obj_lock); |
1039 | |
1040 | put_iova_spaces(obj, close: true); |
1041 | |
1042 | if (obj->import_attach) { |
1043 | GEM_WARN_ON(msm_obj->vaddr); |
1044 | |
1045 | /* Don't drop the pages for imported dmabuf, as they are not |
1046 | * ours, just free the array we allocated: |
1047 | */ |
1048 | kvfree(addr: msm_obj->pages); |
1049 | |
1050 | put_iova_vmas(obj); |
1051 | |
1052 | drm_prime_gem_destroy(obj, sg: msm_obj->sgt); |
1053 | } else { |
1054 | msm_gem_vunmap(obj); |
1055 | put_pages(obj); |
1056 | put_iova_vmas(obj); |
1057 | } |
1058 | |
1059 | drm_gem_object_release(obj); |
1060 | |
1061 | kfree(objp: msm_obj->metadata); |
1062 | kfree(objp: msm_obj); |
1063 | } |
1064 | |
1065 | static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) |
1066 | { |
1067 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
1068 | |
1069 | vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); |
1070 | vma->vm_page_prot = msm_gem_pgprot(msm_obj, prot: vm_get_page_prot(vm_flags: vma->vm_flags)); |
1071 | |
1072 | return 0; |
1073 | } |
1074 | |
1075 | /* convenience method to construct a GEM buffer object, and userspace handle */ |
1076 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, |
1077 | uint32_t size, uint32_t flags, uint32_t *handle, |
1078 | char *name) |
1079 | { |
1080 | struct drm_gem_object *obj; |
1081 | int ret; |
1082 | |
1083 | obj = msm_gem_new(dev, size, flags); |
1084 | |
1085 | if (IS_ERR(ptr: obj)) |
1086 | return PTR_ERR(ptr: obj); |
1087 | |
1088 | if (name) |
1089 | msm_gem_object_set_name(bo: obj, fmt: "%s" , name); |
1090 | |
1091 | ret = drm_gem_handle_create(file_priv: file, obj, handlep: handle); |
1092 | |
1093 | /* drop reference from allocate - handle holds it now */ |
1094 | drm_gem_object_put(obj); |
1095 | |
1096 | return ret; |
1097 | } |
1098 | |
1099 | static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj) |
1100 | { |
1101 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
1102 | enum drm_gem_object_status status = 0; |
1103 | |
1104 | if (msm_obj->pages) |
1105 | status |= DRM_GEM_OBJECT_RESIDENT; |
1106 | |
1107 | if (msm_obj->madv == MSM_MADV_DONTNEED) |
1108 | status |= DRM_GEM_OBJECT_PURGEABLE; |
1109 | |
1110 | return status; |
1111 | } |
1112 | |
1113 | static const struct vm_operations_struct vm_ops = { |
1114 | .fault = msm_gem_fault, |
1115 | .open = drm_gem_vm_open, |
1116 | .close = drm_gem_vm_close, |
1117 | }; |
1118 | |
1119 | static const struct drm_gem_object_funcs msm_gem_object_funcs = { |
1120 | .free = msm_gem_free_object, |
1121 | .pin = msm_gem_prime_pin, |
1122 | .unpin = msm_gem_prime_unpin, |
1123 | .get_sg_table = msm_gem_prime_get_sg_table, |
1124 | .vmap = msm_gem_prime_vmap, |
1125 | .vunmap = msm_gem_prime_vunmap, |
1126 | .mmap = msm_gem_object_mmap, |
1127 | .status = msm_gem_status, |
1128 | .vm_ops = &vm_ops, |
1129 | }; |
1130 | |
1131 | static int msm_gem_new_impl(struct drm_device *dev, |
1132 | uint32_t size, uint32_t flags, |
1133 | struct drm_gem_object **obj) |
1134 | { |
1135 | struct msm_drm_private *priv = dev->dev_private; |
1136 | struct msm_gem_object *msm_obj; |
1137 | |
1138 | switch (flags & MSM_BO_CACHE_MASK) { |
1139 | case MSM_BO_CACHED: |
1140 | case MSM_BO_WC: |
1141 | break; |
1142 | case MSM_BO_CACHED_COHERENT: |
1143 | if (priv->has_cached_coherent) |
1144 | break; |
1145 | fallthrough; |
1146 | default: |
1147 | DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n" , |
1148 | (flags & MSM_BO_CACHE_MASK)); |
1149 | return -EINVAL; |
1150 | } |
1151 | |
1152 | msm_obj = kzalloc(size: sizeof(*msm_obj), GFP_KERNEL); |
1153 | if (!msm_obj) |
1154 | return -ENOMEM; |
1155 | |
1156 | msm_obj->flags = flags; |
1157 | msm_obj->madv = MSM_MADV_WILLNEED; |
1158 | |
1159 | INIT_LIST_HEAD(list: &msm_obj->node); |
1160 | INIT_LIST_HEAD(list: &msm_obj->vmas); |
1161 | |
1162 | *obj = &msm_obj->base; |
1163 | (*obj)->funcs = &msm_gem_object_funcs; |
1164 | |
1165 | return 0; |
1166 | } |
1167 | |
1168 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags) |
1169 | { |
1170 | struct msm_drm_private *priv = dev->dev_private; |
1171 | struct msm_gem_object *msm_obj; |
1172 | struct drm_gem_object *obj = NULL; |
1173 | bool use_vram = false; |
1174 | int ret; |
1175 | |
1176 | size = PAGE_ALIGN(size); |
1177 | |
1178 | if (!msm_use_mmu(dev)) |
1179 | use_vram = true; |
1180 | else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) |
1181 | use_vram = true; |
1182 | |
1183 | if (GEM_WARN_ON(use_vram && !priv->vram.size)) |
1184 | return ERR_PTR(error: -EINVAL); |
1185 | |
1186 | /* Disallow zero sized objects as they make the underlying |
1187 | * infrastructure grumpy |
1188 | */ |
1189 | if (size == 0) |
1190 | return ERR_PTR(error: -EINVAL); |
1191 | |
1192 | ret = msm_gem_new_impl(dev, size, flags, obj: &obj); |
1193 | if (ret) |
1194 | return ERR_PTR(error: ret); |
1195 | |
1196 | msm_obj = to_msm_bo(obj); |
1197 | |
1198 | if (use_vram) { |
1199 | struct msm_gem_vma *vma; |
1200 | struct page **pages; |
1201 | |
1202 | drm_gem_private_object_init(dev, obj, size); |
1203 | |
1204 | msm_gem_lock(obj); |
1205 | |
1206 | vma = add_vma(obj, NULL); |
1207 | msm_gem_unlock(obj); |
1208 | if (IS_ERR(ptr: vma)) { |
1209 | ret = PTR_ERR(ptr: vma); |
1210 | goto fail; |
1211 | } |
1212 | |
1213 | to_msm_bo(obj)->vram_node = &vma->node; |
1214 | |
1215 | msm_gem_lock(obj); |
1216 | pages = get_pages(obj); |
1217 | msm_gem_unlock(obj); |
1218 | if (IS_ERR(ptr: pages)) { |
1219 | ret = PTR_ERR(ptr: pages); |
1220 | goto fail; |
1221 | } |
1222 | |
1223 | vma->iova = physaddr(obj); |
1224 | } else { |
1225 | ret = drm_gem_object_init(dev, obj, size); |
1226 | if (ret) |
1227 | goto fail; |
1228 | /* |
1229 | * Our buffers are kept pinned, so allocating them from the |
1230 | * MOVABLE zone is a really bad idea, and conflicts with CMA. |
1231 | * See comments above new_inode() why this is required _and_ |
1232 | * expected if you're going to pin these pages. |
1233 | */ |
1234 | mapping_set_gfp_mask(m: obj->filp->f_mapping, GFP_HIGHUSER); |
1235 | } |
1236 | |
1237 | drm_gem_lru_move_tail(lru: &priv->lru.unbacked, obj); |
1238 | |
1239 | mutex_lock(&priv->obj_lock); |
1240 | list_add_tail(new: &msm_obj->node, head: &priv->objects); |
1241 | mutex_unlock(lock: &priv->obj_lock); |
1242 | |
1243 | ret = drm_gem_create_mmap_offset(obj); |
1244 | if (ret) |
1245 | goto fail; |
1246 | |
1247 | return obj; |
1248 | |
1249 | fail: |
1250 | drm_gem_object_put(obj); |
1251 | return ERR_PTR(error: ret); |
1252 | } |
1253 | |
1254 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
1255 | struct dma_buf *dmabuf, struct sg_table *sgt) |
1256 | { |
1257 | struct msm_drm_private *priv = dev->dev_private; |
1258 | struct msm_gem_object *msm_obj; |
1259 | struct drm_gem_object *obj; |
1260 | uint32_t size; |
1261 | int ret, npages; |
1262 | |
1263 | /* if we don't have IOMMU, don't bother pretending we can import: */ |
1264 | if (!msm_use_mmu(dev)) { |
1265 | DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n" ); |
1266 | return ERR_PTR(error: -EINVAL); |
1267 | } |
1268 | |
1269 | size = PAGE_ALIGN(dmabuf->size); |
1270 | |
1271 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, obj: &obj); |
1272 | if (ret) |
1273 | return ERR_PTR(error: ret); |
1274 | |
1275 | drm_gem_private_object_init(dev, obj, size); |
1276 | |
1277 | npages = size / PAGE_SIZE; |
1278 | |
1279 | msm_obj = to_msm_bo(obj); |
1280 | msm_gem_lock(obj); |
1281 | msm_obj->sgt = sgt; |
1282 | msm_obj->pages = kvmalloc_array(n: npages, size: sizeof(struct page *), GFP_KERNEL); |
1283 | if (!msm_obj->pages) { |
1284 | msm_gem_unlock(obj); |
1285 | ret = -ENOMEM; |
1286 | goto fail; |
1287 | } |
1288 | |
1289 | ret = drm_prime_sg_to_page_array(sgt, pages: msm_obj->pages, max_pages: npages); |
1290 | if (ret) { |
1291 | msm_gem_unlock(obj); |
1292 | goto fail; |
1293 | } |
1294 | |
1295 | msm_gem_unlock(obj); |
1296 | |
1297 | drm_gem_lru_move_tail(lru: &priv->lru.pinned, obj); |
1298 | |
1299 | mutex_lock(&priv->obj_lock); |
1300 | list_add_tail(new: &msm_obj->node, head: &priv->objects); |
1301 | mutex_unlock(lock: &priv->obj_lock); |
1302 | |
1303 | ret = drm_gem_create_mmap_offset(obj); |
1304 | if (ret) |
1305 | goto fail; |
1306 | |
1307 | return obj; |
1308 | |
1309 | fail: |
1310 | drm_gem_object_put(obj); |
1311 | return ERR_PTR(error: ret); |
1312 | } |
1313 | |
1314 | void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, |
1315 | uint32_t flags, struct msm_gem_address_space *aspace, |
1316 | struct drm_gem_object **bo, uint64_t *iova) |
1317 | { |
1318 | void *vaddr; |
1319 | struct drm_gem_object *obj = msm_gem_new(dev, size, flags); |
1320 | int ret; |
1321 | |
1322 | if (IS_ERR(ptr: obj)) |
1323 | return ERR_CAST(ptr: obj); |
1324 | |
1325 | if (iova) { |
1326 | ret = msm_gem_get_and_pin_iova(obj, aspace, iova); |
1327 | if (ret) |
1328 | goto err; |
1329 | } |
1330 | |
1331 | vaddr = msm_gem_get_vaddr(obj); |
1332 | if (IS_ERR(ptr: vaddr)) { |
1333 | msm_gem_unpin_iova(obj, aspace); |
1334 | ret = PTR_ERR(ptr: vaddr); |
1335 | goto err; |
1336 | } |
1337 | |
1338 | if (bo) |
1339 | *bo = obj; |
1340 | |
1341 | return vaddr; |
1342 | err: |
1343 | drm_gem_object_put(obj); |
1344 | |
1345 | return ERR_PTR(error: ret); |
1346 | |
1347 | } |
1348 | |
1349 | void msm_gem_kernel_put(struct drm_gem_object *bo, |
1350 | struct msm_gem_address_space *aspace) |
1351 | { |
1352 | if (IS_ERR_OR_NULL(ptr: bo)) |
1353 | return; |
1354 | |
1355 | msm_gem_put_vaddr(obj: bo); |
1356 | msm_gem_unpin_iova(obj: bo, aspace); |
1357 | drm_gem_object_put(obj: bo); |
1358 | } |
1359 | |
1360 | void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) |
1361 | { |
1362 | struct msm_gem_object *msm_obj = to_msm_bo(bo); |
1363 | va_list ap; |
1364 | |
1365 | if (!fmt) |
1366 | return; |
1367 | |
1368 | va_start(ap, fmt); |
1369 | vsnprintf(buf: msm_obj->name, size: sizeof(msm_obj->name), fmt, args: ap); |
1370 | va_end(ap); |
1371 | } |
1372 | |