1 | /* SPDX-License-Identifier: GPL-2.0 OR MIT */ |
2 | /************************************************************************** |
3 | * |
4 | * Copyright (c) 2009-2023 VMware, Inc., Palo Alto, CA., USA |
5 | * All Rights Reserved. |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a |
8 | * copy of this software and associated documentation files (the |
9 | * "Software"), to deal in the Software without restriction, including |
10 | * without limitation the rights to use, copy, modify, merge, publish, |
11 | * distribute, sub license, and/or sell copies of the Software, and to |
12 | * permit persons to whom the Software is furnished to do so, subject to |
13 | * the following conditions: |
14 | * |
15 | * The above copyright notice and this permission notice (including the |
16 | * next paragraph) shall be included in all copies or substantial portions |
17 | * of the Software. |
18 | * |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
26 | * |
27 | **************************************************************************/ |
28 | /* |
29 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
30 | * |
31 | * While no substantial code is shared, the prime code is inspired by |
32 | * drm_prime.c, with |
33 | * Authors: |
34 | * Dave Airlie <airlied@redhat.com> |
35 | * Rob Clark <rob.clark@linaro.org> |
36 | */ |
37 | /** @file ttm_ref_object.c |
38 | * |
39 | * Base- and reference object implementation for the various |
40 | * ttm objects. Implements reference counting, minimal security checks |
41 | * and release on file close. |
42 | */ |
43 | |
44 | |
45 | #define pr_fmt(fmt) "[TTM] " fmt |
46 | |
47 | #include "ttm_object.h" |
48 | #include "vmwgfx_drv.h" |
49 | |
50 | #include <linux/list.h> |
51 | #include <linux/spinlock.h> |
52 | #include <linux/slab.h> |
53 | #include <linux/atomic.h> |
54 | #include <linux/module.h> |
55 | #include <linux/hashtable.h> |
56 | |
57 | MODULE_IMPORT_NS(DMA_BUF); |
58 | |
59 | #define VMW_TTM_OBJECT_REF_HT_ORDER 10 |
60 | |
61 | /** |
62 | * struct ttm_object_file |
63 | * |
64 | * @tdev: Pointer to the ttm_object_device. |
65 | * |
66 | * @lock: Lock that protects the ref_list list and the |
67 | * ref_hash hash tables. |
68 | * |
69 | * @ref_list: List of ttm_ref_objects to be destroyed at |
70 | * file release. |
71 | * |
72 | * @ref_hash: Hash tables of ref objects, one per ttm_ref_type, |
73 | * for fast lookup of ref objects given a base object. |
74 | * |
75 | * @refcount: reference/usage count |
76 | */ |
77 | struct ttm_object_file { |
78 | struct ttm_object_device *tdev; |
79 | spinlock_t lock; |
80 | struct list_head ref_list; |
81 | DECLARE_HASHTABLE(ref_hash, VMW_TTM_OBJECT_REF_HT_ORDER); |
82 | struct kref refcount; |
83 | }; |
84 | |
85 | /* |
86 | * struct ttm_object_device |
87 | * |
88 | * @object_lock: lock that protects idr. |
89 | * |
90 | * @object_count: Per device object count. |
91 | * |
92 | * This is the per-device data structure needed for ttm object management. |
93 | */ |
94 | |
95 | struct ttm_object_device { |
96 | spinlock_t object_lock; |
97 | atomic_t object_count; |
98 | struct dma_buf_ops ops; |
99 | void (*dmabuf_release)(struct dma_buf *dma_buf); |
100 | struct idr idr; |
101 | }; |
102 | |
103 | /* |
104 | * struct ttm_ref_object |
105 | * |
106 | * @hash: Hash entry for the per-file object reference hash. |
107 | * |
108 | * @head: List entry for the per-file list of ref-objects. |
109 | * |
110 | * @kref: Ref count. |
111 | * |
112 | * @obj: Base object this ref object is referencing. |
113 | * |
114 | * @ref_type: Type of ref object. |
115 | * |
116 | * This is similar to an idr object, but it also has a hash table entry |
117 | * that allows lookup with a pointer to the referenced object as a key. In |
118 | * that way, one can easily detect whether a base object is referenced by |
119 | * a particular ttm_object_file. It also carries a ref count to avoid creating |
120 | * multiple ref objects if a ttm_object_file references the same base |
121 | * object more than once. |
122 | */ |
123 | |
124 | struct ttm_ref_object { |
125 | struct rcu_head rcu_head; |
126 | struct vmwgfx_hash_item hash; |
127 | struct list_head head; |
128 | struct kref kref; |
129 | struct ttm_base_object *obj; |
130 | struct ttm_object_file *tfile; |
131 | }; |
132 | |
133 | static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf); |
134 | |
135 | static inline struct ttm_object_file * |
136 | ttm_object_file_ref(struct ttm_object_file *tfile) |
137 | { |
138 | kref_get(kref: &tfile->refcount); |
139 | return tfile; |
140 | } |
141 | |
142 | static int ttm_tfile_find_ref_rcu(struct ttm_object_file *tfile, |
143 | uint64_t key, |
144 | struct vmwgfx_hash_item **p_hash) |
145 | { |
146 | struct vmwgfx_hash_item *hash; |
147 | |
148 | hash_for_each_possible_rcu(tfile->ref_hash, hash, head, key) { |
149 | if (hash->key == key) { |
150 | *p_hash = hash; |
151 | return 0; |
152 | } |
153 | } |
154 | return -EINVAL; |
155 | } |
156 | |
157 | static int ttm_tfile_find_ref(struct ttm_object_file *tfile, |
158 | uint64_t key, |
159 | struct vmwgfx_hash_item **p_hash) |
160 | { |
161 | struct vmwgfx_hash_item *hash; |
162 | |
163 | hash_for_each_possible(tfile->ref_hash, hash, head, key) { |
164 | if (hash->key == key) { |
165 | *p_hash = hash; |
166 | return 0; |
167 | } |
168 | } |
169 | return -EINVAL; |
170 | } |
171 | |
172 | static void ttm_object_file_destroy(struct kref *kref) |
173 | { |
174 | struct ttm_object_file *tfile = |
175 | container_of(kref, struct ttm_object_file, refcount); |
176 | |
177 | kfree(objp: tfile); |
178 | } |
179 | |
180 | |
181 | static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile) |
182 | { |
183 | struct ttm_object_file *tfile = *p_tfile; |
184 | |
185 | *p_tfile = NULL; |
186 | kref_put(kref: &tfile->refcount, release: ttm_object_file_destroy); |
187 | } |
188 | |
189 | |
190 | int ttm_base_object_init(struct ttm_object_file *tfile, |
191 | struct ttm_base_object *base, |
192 | bool shareable, |
193 | enum ttm_object_type object_type, |
194 | void (*refcount_release) (struct ttm_base_object **)) |
195 | { |
196 | struct ttm_object_device *tdev = tfile->tdev; |
197 | int ret; |
198 | |
199 | base->shareable = shareable; |
200 | base->tfile = ttm_object_file_ref(tfile); |
201 | base->refcount_release = refcount_release; |
202 | base->object_type = object_type; |
203 | kref_init(kref: &base->refcount); |
204 | idr_preload(GFP_KERNEL); |
205 | spin_lock(lock: &tdev->object_lock); |
206 | ret = idr_alloc(&tdev->idr, ptr: base, start: 1, end: 0, GFP_NOWAIT); |
207 | spin_unlock(lock: &tdev->object_lock); |
208 | idr_preload_end(); |
209 | if (ret < 0) |
210 | return ret; |
211 | |
212 | base->handle = ret; |
213 | ret = ttm_ref_object_add(tfile, base, NULL, require_existed: false); |
214 | if (unlikely(ret != 0)) |
215 | goto out_err1; |
216 | |
217 | ttm_base_object_unref(p_base: &base); |
218 | |
219 | return 0; |
220 | out_err1: |
221 | spin_lock(lock: &tdev->object_lock); |
222 | idr_remove(&tdev->idr, id: base->handle); |
223 | spin_unlock(lock: &tdev->object_lock); |
224 | return ret; |
225 | } |
226 | |
227 | static void ttm_release_base(struct kref *kref) |
228 | { |
229 | struct ttm_base_object *base = |
230 | container_of(kref, struct ttm_base_object, refcount); |
231 | struct ttm_object_device *tdev = base->tfile->tdev; |
232 | |
233 | spin_lock(lock: &tdev->object_lock); |
234 | idr_remove(&tdev->idr, id: base->handle); |
235 | spin_unlock(lock: &tdev->object_lock); |
236 | |
237 | /* |
238 | * Note: We don't use synchronize_rcu() here because it's far |
239 | * too slow. It's up to the user to free the object using |
240 | * call_rcu() or ttm_base_object_kfree(). |
241 | */ |
242 | |
243 | ttm_object_file_unref(p_tfile: &base->tfile); |
244 | if (base->refcount_release) |
245 | base->refcount_release(&base); |
246 | } |
247 | |
248 | void ttm_base_object_unref(struct ttm_base_object **p_base) |
249 | { |
250 | struct ttm_base_object *base = *p_base; |
251 | |
252 | *p_base = NULL; |
253 | |
254 | kref_put(kref: &base->refcount, release: ttm_release_base); |
255 | } |
256 | |
257 | struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, |
258 | uint64_t key) |
259 | { |
260 | struct ttm_base_object *base = NULL; |
261 | struct vmwgfx_hash_item *hash; |
262 | int ret; |
263 | |
264 | spin_lock(lock: &tfile->lock); |
265 | ret = ttm_tfile_find_ref(tfile, key, p_hash: &hash); |
266 | |
267 | if (likely(ret == 0)) { |
268 | base = hlist_entry(hash, struct ttm_ref_object, hash)->obj; |
269 | if (!kref_get_unless_zero(kref: &base->refcount)) |
270 | base = NULL; |
271 | } |
272 | spin_unlock(lock: &tfile->lock); |
273 | |
274 | |
275 | return base; |
276 | } |
277 | |
278 | struct ttm_base_object * |
279 | ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key) |
280 | { |
281 | struct ttm_base_object *base; |
282 | |
283 | rcu_read_lock(); |
284 | base = idr_find(&tdev->idr, id: key); |
285 | |
286 | if (base && !kref_get_unless_zero(kref: &base->refcount)) |
287 | base = NULL; |
288 | rcu_read_unlock(); |
289 | |
290 | return base; |
291 | } |
292 | |
293 | int ttm_ref_object_add(struct ttm_object_file *tfile, |
294 | struct ttm_base_object *base, |
295 | bool *existed, |
296 | bool require_existed) |
297 | { |
298 | struct ttm_ref_object *ref; |
299 | struct vmwgfx_hash_item *hash; |
300 | int ret = -EINVAL; |
301 | |
302 | if (base->tfile != tfile && !base->shareable) |
303 | return -EPERM; |
304 | |
305 | if (existed != NULL) |
306 | *existed = true; |
307 | |
308 | while (ret == -EINVAL) { |
309 | rcu_read_lock(); |
310 | ret = ttm_tfile_find_ref_rcu(tfile, key: base->handle, p_hash: &hash); |
311 | |
312 | if (ret == 0) { |
313 | ref = hlist_entry(hash, struct ttm_ref_object, hash); |
314 | if (kref_get_unless_zero(kref: &ref->kref)) { |
315 | rcu_read_unlock(); |
316 | break; |
317 | } |
318 | } |
319 | |
320 | rcu_read_unlock(); |
321 | if (require_existed) |
322 | return -EPERM; |
323 | |
324 | ref = kmalloc(size: sizeof(*ref), GFP_KERNEL); |
325 | if (unlikely(ref == NULL)) { |
326 | return -ENOMEM; |
327 | } |
328 | |
329 | ref->hash.key = base->handle; |
330 | ref->obj = base; |
331 | ref->tfile = tfile; |
332 | kref_init(kref: &ref->kref); |
333 | |
334 | spin_lock(lock: &tfile->lock); |
335 | hash_add_rcu(tfile->ref_hash, &ref->hash.head, ref->hash.key); |
336 | ret = 0; |
337 | |
338 | list_add_tail(new: &ref->head, head: &tfile->ref_list); |
339 | kref_get(kref: &base->refcount); |
340 | spin_unlock(lock: &tfile->lock); |
341 | if (existed != NULL) |
342 | *existed = false; |
343 | } |
344 | |
345 | return ret; |
346 | } |
347 | |
348 | static void __releases(tfile->lock) __acquires(tfile->lock) |
349 | ttm_ref_object_release(struct kref *kref) |
350 | { |
351 | struct ttm_ref_object *ref = |
352 | container_of(kref, struct ttm_ref_object, kref); |
353 | struct ttm_object_file *tfile = ref->tfile; |
354 | |
355 | hash_del_rcu(node: &ref->hash.head); |
356 | list_del(entry: &ref->head); |
357 | spin_unlock(lock: &tfile->lock); |
358 | |
359 | ttm_base_object_unref(p_base: &ref->obj); |
360 | kfree_rcu(ref, rcu_head); |
361 | spin_lock(lock: &tfile->lock); |
362 | } |
363 | |
364 | int ttm_ref_object_base_unref(struct ttm_object_file *tfile, |
365 | unsigned long key) |
366 | { |
367 | struct ttm_ref_object *ref; |
368 | struct vmwgfx_hash_item *hash; |
369 | int ret; |
370 | |
371 | spin_lock(lock: &tfile->lock); |
372 | ret = ttm_tfile_find_ref(tfile, key, p_hash: &hash); |
373 | if (unlikely(ret != 0)) { |
374 | spin_unlock(lock: &tfile->lock); |
375 | return -EINVAL; |
376 | } |
377 | ref = hlist_entry(hash, struct ttm_ref_object, hash); |
378 | kref_put(kref: &ref->kref, release: ttm_ref_object_release); |
379 | spin_unlock(lock: &tfile->lock); |
380 | return 0; |
381 | } |
382 | |
383 | void ttm_object_file_release(struct ttm_object_file **p_tfile) |
384 | { |
385 | struct ttm_ref_object *ref; |
386 | struct list_head *list; |
387 | struct ttm_object_file *tfile = *p_tfile; |
388 | |
389 | *p_tfile = NULL; |
390 | spin_lock(lock: &tfile->lock); |
391 | |
392 | /* |
393 | * Since we release the lock within the loop, we have to |
394 | * restart it from the beginning each time. |
395 | */ |
396 | |
397 | while (!list_empty(head: &tfile->ref_list)) { |
398 | list = tfile->ref_list.next; |
399 | ref = list_entry(list, struct ttm_ref_object, head); |
400 | ttm_ref_object_release(kref: &ref->kref); |
401 | } |
402 | |
403 | spin_unlock(lock: &tfile->lock); |
404 | |
405 | ttm_object_file_unref(p_tfile: &tfile); |
406 | } |
407 | |
408 | struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev) |
409 | { |
410 | struct ttm_object_file *tfile = kmalloc(size: sizeof(*tfile), GFP_KERNEL); |
411 | |
412 | if (unlikely(tfile == NULL)) |
413 | return NULL; |
414 | |
415 | spin_lock_init(&tfile->lock); |
416 | tfile->tdev = tdev; |
417 | kref_init(kref: &tfile->refcount); |
418 | INIT_LIST_HEAD(list: &tfile->ref_list); |
419 | |
420 | hash_init(tfile->ref_hash); |
421 | |
422 | return tfile; |
423 | } |
424 | |
425 | struct ttm_object_device * |
426 | ttm_object_device_init(const struct dma_buf_ops *ops) |
427 | { |
428 | struct ttm_object_device *tdev = kmalloc(size: sizeof(*tdev), GFP_KERNEL); |
429 | |
430 | if (unlikely(tdev == NULL)) |
431 | return NULL; |
432 | |
433 | spin_lock_init(&tdev->object_lock); |
434 | atomic_set(v: &tdev->object_count, i: 0); |
435 | |
436 | /* |
437 | * Our base is at VMWGFX_NUM_MOB + 1 because we want to create |
438 | * a seperate namespace for GEM handles (which are |
439 | * 1..VMWGFX_NUM_MOB) and the surface handles. Some ioctl's |
440 | * can take either handle as an argument so we want to |
441 | * easily be able to tell whether the handle refers to a |
442 | * GEM buffer or a surface. |
443 | */ |
444 | idr_init_base(idr: &tdev->idr, VMWGFX_NUM_MOB + 1); |
445 | tdev->ops = *ops; |
446 | tdev->dmabuf_release = tdev->ops.release; |
447 | tdev->ops.release = ttm_prime_dmabuf_release; |
448 | return tdev; |
449 | } |
450 | |
451 | void ttm_object_device_release(struct ttm_object_device **p_tdev) |
452 | { |
453 | struct ttm_object_device *tdev = *p_tdev; |
454 | |
455 | *p_tdev = NULL; |
456 | |
457 | WARN_ON_ONCE(!idr_is_empty(&tdev->idr)); |
458 | idr_destroy(&tdev->idr); |
459 | |
460 | kfree(objp: tdev); |
461 | } |
462 | |
463 | /** |
464 | * get_dma_buf_unless_doomed - get a dma_buf reference if possible. |
465 | * |
466 | * @dmabuf: Non-refcounted pointer to a struct dma-buf. |
467 | * |
468 | * Obtain a file reference from a lookup structure that doesn't refcount |
469 | * the file, but synchronizes with its release method to make sure it has |
470 | * not been freed yet. See for example kref_get_unless_zero documentation. |
471 | * Returns true if refcounting succeeds, false otherwise. |
472 | * |
473 | * Nobody really wants this as a public API yet, so let it mature here |
474 | * for some time... |
475 | */ |
476 | static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf) |
477 | { |
478 | return atomic_long_inc_not_zero(v: &dmabuf->file->f_count) != 0L; |
479 | } |
480 | |
481 | /** |
482 | * ttm_prime_refcount_release - refcount release method for a prime object. |
483 | * |
484 | * @p_base: Pointer to ttm_base_object pointer. |
485 | * |
486 | * This is a wrapper that calls the refcount_release founction of the |
487 | * underlying object. At the same time it cleans up the prime object. |
488 | * This function is called when all references to the base object we |
489 | * derive from are gone. |
490 | */ |
491 | static void ttm_prime_refcount_release(struct ttm_base_object **p_base) |
492 | { |
493 | struct ttm_base_object *base = *p_base; |
494 | struct ttm_prime_object *prime; |
495 | |
496 | *p_base = NULL; |
497 | prime = container_of(base, struct ttm_prime_object, base); |
498 | BUG_ON(prime->dma_buf != NULL); |
499 | mutex_destroy(lock: &prime->mutex); |
500 | if (prime->refcount_release) |
501 | prime->refcount_release(&base); |
502 | } |
503 | |
504 | /** |
505 | * ttm_prime_dmabuf_release - Release method for the dma-bufs we export |
506 | * |
507 | * @dma_buf: |
508 | * |
509 | * This function first calls the dma_buf release method the driver |
510 | * provides. Then it cleans up our dma_buf pointer used for lookup, |
511 | * and finally releases the reference the dma_buf has on our base |
512 | * object. |
513 | */ |
514 | static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf) |
515 | { |
516 | struct ttm_prime_object *prime = |
517 | (struct ttm_prime_object *) dma_buf->priv; |
518 | struct ttm_base_object *base = &prime->base; |
519 | struct ttm_object_device *tdev = base->tfile->tdev; |
520 | |
521 | if (tdev->dmabuf_release) |
522 | tdev->dmabuf_release(dma_buf); |
523 | mutex_lock(&prime->mutex); |
524 | if (prime->dma_buf == dma_buf) |
525 | prime->dma_buf = NULL; |
526 | mutex_unlock(lock: &prime->mutex); |
527 | ttm_base_object_unref(p_base: &base); |
528 | } |
529 | |
530 | /** |
531 | * ttm_prime_fd_to_handle - Get a base object handle from a prime fd |
532 | * |
533 | * @tfile: A struct ttm_object_file identifying the caller. |
534 | * @fd: The prime / dmabuf fd. |
535 | * @handle: The returned handle. |
536 | * |
537 | * This function returns a handle to an object that previously exported |
538 | * a dma-buf. Note that we don't handle imports yet, because we simply |
539 | * have no consumers of that implementation. |
540 | */ |
541 | int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, |
542 | int fd, u32 *handle) |
543 | { |
544 | struct ttm_object_device *tdev = tfile->tdev; |
545 | struct dma_buf *dma_buf; |
546 | struct ttm_prime_object *prime; |
547 | struct ttm_base_object *base; |
548 | int ret; |
549 | |
550 | dma_buf = dma_buf_get(fd); |
551 | if (IS_ERR(ptr: dma_buf)) |
552 | return PTR_ERR(ptr: dma_buf); |
553 | |
554 | if (dma_buf->ops != &tdev->ops) |
555 | return -ENOSYS; |
556 | |
557 | prime = (struct ttm_prime_object *) dma_buf->priv; |
558 | base = &prime->base; |
559 | *handle = base->handle; |
560 | ret = ttm_ref_object_add(tfile, base, NULL, require_existed: false); |
561 | |
562 | dma_buf_put(dmabuf: dma_buf); |
563 | |
564 | return ret; |
565 | } |
566 | |
567 | /** |
568 | * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object |
569 | * |
570 | * @tfile: Struct ttm_object_file identifying the caller. |
571 | * @handle: Handle to the object we're exporting from. |
572 | * @flags: flags for dma-buf creation. We just pass them on. |
573 | * @prime_fd: The returned file descriptor. |
574 | * |
575 | */ |
576 | int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, |
577 | uint32_t handle, uint32_t flags, |
578 | int *prime_fd) |
579 | { |
580 | struct ttm_object_device *tdev = tfile->tdev; |
581 | struct ttm_base_object *base; |
582 | struct dma_buf *dma_buf; |
583 | struct ttm_prime_object *prime; |
584 | int ret; |
585 | |
586 | base = ttm_base_object_lookup(tfile, key: handle); |
587 | if (unlikely(base == NULL || |
588 | base->object_type != ttm_prime_type)) { |
589 | ret = -ENOENT; |
590 | goto out_unref; |
591 | } |
592 | |
593 | prime = container_of(base, struct ttm_prime_object, base); |
594 | if (unlikely(!base->shareable)) { |
595 | ret = -EPERM; |
596 | goto out_unref; |
597 | } |
598 | |
599 | ret = mutex_lock_interruptible(&prime->mutex); |
600 | if (unlikely(ret != 0)) { |
601 | ret = -ERESTARTSYS; |
602 | goto out_unref; |
603 | } |
604 | |
605 | dma_buf = prime->dma_buf; |
606 | if (!dma_buf || !get_dma_buf_unless_doomed(dmabuf: dma_buf)) { |
607 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
608 | exp_info.ops = &tdev->ops; |
609 | exp_info.size = prime->size; |
610 | exp_info.flags = flags; |
611 | exp_info.priv = prime; |
612 | |
613 | /* |
614 | * Need to create a new dma_buf |
615 | */ |
616 | |
617 | dma_buf = dma_buf_export(exp_info: &exp_info); |
618 | if (IS_ERR(ptr: dma_buf)) { |
619 | ret = PTR_ERR(ptr: dma_buf); |
620 | mutex_unlock(lock: &prime->mutex); |
621 | goto out_unref; |
622 | } |
623 | |
624 | /* |
625 | * dma_buf has taken the base object reference |
626 | */ |
627 | base = NULL; |
628 | prime->dma_buf = dma_buf; |
629 | } |
630 | mutex_unlock(lock: &prime->mutex); |
631 | |
632 | ret = dma_buf_fd(dmabuf: dma_buf, flags); |
633 | if (ret >= 0) { |
634 | *prime_fd = ret; |
635 | ret = 0; |
636 | } else |
637 | dma_buf_put(dmabuf: dma_buf); |
638 | |
639 | out_unref: |
640 | if (base) |
641 | ttm_base_object_unref(p_base: &base); |
642 | return ret; |
643 | } |
644 | |
645 | /** |
646 | * ttm_prime_object_init - Initialize a ttm_prime_object |
647 | * |
648 | * @tfile: struct ttm_object_file identifying the caller |
649 | * @size: The size of the dma_bufs we export. |
650 | * @prime: The object to be initialized. |
651 | * @type: See ttm_base_object_init |
652 | * @refcount_release: See ttm_base_object_init |
653 | * |
654 | * Initializes an object which is compatible with the drm_prime model |
655 | * for data sharing between processes and devices. |
656 | */ |
657 | int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size, |
658 | struct ttm_prime_object *prime, |
659 | enum ttm_object_type type, |
660 | void (*refcount_release) (struct ttm_base_object **)) |
661 | { |
662 | bool shareable = !!(type == VMW_RES_SURFACE); |
663 | mutex_init(&prime->mutex); |
664 | prime->size = PAGE_ALIGN(size); |
665 | prime->real_type = type; |
666 | prime->dma_buf = NULL; |
667 | prime->refcount_release = refcount_release; |
668 | return ttm_base_object_init(tfile, base: &prime->base, shareable, |
669 | object_type: ttm_prime_type, |
670 | refcount_release: ttm_prime_refcount_release); |
671 | } |
672 | |