1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <linux/dma-buf.h>
29#include <linux/file.h>
30#include <linux/fs.h>
31#include <linux/iosys-map.h>
32#include <linux/mem_encrypt.h>
33#include <linux/mm.h>
34#include <linux/mman.h>
35#include <linux/module.h>
36#include <linux/pagemap.h>
37#include <linux/pagevec.h>
38#include <linux/shmem_fs.h>
39#include <linux/slab.h>
40#include <linux/string_helpers.h>
41#include <linux/types.h>
42#include <linux/uaccess.h>
43
44#include <drm/drm.h>
45#include <drm/drm_device.h>
46#include <drm/drm_drv.h>
47#include <drm/drm_file.h>
48#include <drm/drm_gem.h>
49#include <drm/drm_managed.h>
50#include <drm/drm_print.h>
51#include <drm/drm_vma_manager.h>
52
53#include "drm_internal.h"
54
55/** @file drm_gem.c
56 *
57 * This file provides some of the base ioctls and library routines for
58 * the graphics memory manager implemented by each device driver.
59 *
60 * Because various devices have different requirements in terms of
61 * synchronization and migration strategies, implementing that is left up to
62 * the driver, and all that the general API provides should be generic --
63 * allocating objects, reading/writing data with the cpu, freeing objects.
64 * Even there, platform-dependent optimizations for reading/writing data with
65 * the CPU mean we'll likely hook those out to driver-specific calls. However,
66 * the DRI2 implementation wants to have at least allocate/mmap be generic.
67 *
68 * The goal was to have swap-backed object allocation managed through
69 * struct file. However, file descriptors as handles to a struct file have
70 * two major failings:
71 * - Process limits prevent more than 1024 or so being used at a time by
72 * default.
73 * - Inability to allocate high fds will aggravate the X Server's select()
74 * handling, and likely that of many GL client applications as well.
75 *
76 * This led to a plan of using our own integer IDs (called handles, following
77 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
78 * ioctls. The objects themselves will still include the struct file so
79 * that we can transition to fds if the required kernel infrastructure shows
80 * up at a later date, and as our interface with shmfs for memory allocation.
81 */
82
83static void
84drm_gem_init_release(struct drm_device *dev, void *ptr)
85{
86 drm_vma_offset_manager_destroy(mgr: dev->vma_offset_manager);
87}
88
89/**
90 * drm_gem_init - Initialize the GEM device fields
91 * @dev: drm_devic structure to initialize
92 */
93int
94drm_gem_init(struct drm_device *dev)
95{
96 struct drm_vma_offset_manager *vma_offset_manager;
97
98 mutex_init(&dev->object_name_lock);
99 idr_init_base(idr: &dev->object_name_idr, base: 1);
100
101 vma_offset_manager = drmm_kzalloc(dev, size: sizeof(*vma_offset_manager),
102 GFP_KERNEL);
103 if (!vma_offset_manager) {
104 DRM_ERROR("out of memory\n");
105 return -ENOMEM;
106 }
107
108 dev->vma_offset_manager = vma_offset_manager;
109 drm_vma_offset_manager_init(mgr: vma_offset_manager,
110 DRM_FILE_PAGE_OFFSET_START,
111 DRM_FILE_PAGE_OFFSET_SIZE);
112
113 return drmm_add_action(dev, drm_gem_init_release, NULL);
114}
115
116/**
117 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
118 * @dev: drm_device the object should be initialized for
119 * @obj: drm_gem_object to initialize
120 * @size: object size
121 *
122 * Initialize an already allocated GEM object of the specified size with
123 * shmfs backing store.
124 */
125int drm_gem_object_init(struct drm_device *dev,
126 struct drm_gem_object *obj, size_t size)
127{
128 struct file *filp;
129
130 drm_gem_private_object_init(dev, obj, size);
131
132 filp = shmem_file_setup(name: "drm mm object", size, VM_NORESERVE);
133 if (IS_ERR(ptr: filp))
134 return PTR_ERR(ptr: filp);
135
136 obj->filp = filp;
137
138 return 0;
139}
140EXPORT_SYMBOL(drm_gem_object_init);
141
142/**
143 * drm_gem_private_object_init - initialize an allocated private GEM object
144 * @dev: drm_device the object should be initialized for
145 * @obj: drm_gem_object to initialize
146 * @size: object size
147 *
148 * Initialize an already allocated GEM object of the specified size with
149 * no GEM provided backing store. Instead the caller is responsible for
150 * backing the object and handling it.
151 */
152void drm_gem_private_object_init(struct drm_device *dev,
153 struct drm_gem_object *obj, size_t size)
154{
155 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
156
157 obj->dev = dev;
158 obj->filp = NULL;
159
160 kref_init(kref: &obj->refcount);
161 obj->handle_count = 0;
162 obj->size = size;
163 dma_resv_init(obj: &obj->_resv);
164 if (!obj->resv)
165 obj->resv = &obj->_resv;
166
167 if (drm_core_check_feature(dev, feature: DRIVER_GEM_GPUVA))
168 drm_gem_gpuva_init(obj);
169
170 drm_vma_node_reset(node: &obj->vma_node);
171 INIT_LIST_HEAD(list: &obj->lru_node);
172}
173EXPORT_SYMBOL(drm_gem_private_object_init);
174
175/**
176 * drm_gem_private_object_fini - Finalize a failed drm_gem_object
177 * @obj: drm_gem_object
178 *
179 * Uninitialize an already allocated GEM object when it initialized failed
180 */
181void drm_gem_private_object_fini(struct drm_gem_object *obj)
182{
183 WARN_ON(obj->dma_buf);
184
185 dma_resv_fini(obj: &obj->_resv);
186}
187EXPORT_SYMBOL(drm_gem_private_object_fini);
188
189/**
190 * drm_gem_object_handle_free - release resources bound to userspace handles
191 * @obj: GEM object to clean up.
192 *
193 * Called after the last handle to the object has been closed
194 *
195 * Removes any name for the object. Note that this must be
196 * called before drm_gem_object_free or we'll be touching
197 * freed memory
198 */
199static void drm_gem_object_handle_free(struct drm_gem_object *obj)
200{
201 struct drm_device *dev = obj->dev;
202
203 /* Remove any name for this object */
204 if (obj->name) {
205 idr_remove(&dev->object_name_idr, id: obj->name);
206 obj->name = 0;
207 }
208}
209
210static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
211{
212 /* Unbreak the reference cycle if we have an exported dma_buf. */
213 if (obj->dma_buf) {
214 dma_buf_put(dmabuf: obj->dma_buf);
215 obj->dma_buf = NULL;
216 }
217}
218
219static void
220drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
221{
222 struct drm_device *dev = obj->dev;
223 bool final = false;
224
225 if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
226 return;
227
228 /*
229 * Must bump handle count first as this may be the last
230 * ref, in which case the object would disappear before we
231 * checked for a name
232 */
233
234 mutex_lock(&dev->object_name_lock);
235 if (--obj->handle_count == 0) {
236 drm_gem_object_handle_free(obj);
237 drm_gem_object_exported_dma_buf_free(obj);
238 final = true;
239 }
240 mutex_unlock(lock: &dev->object_name_lock);
241
242 if (final)
243 drm_gem_object_put(obj);
244}
245
246/*
247 * Called at device or object close to release the file's
248 * handle references on objects.
249 */
250static int
251drm_gem_object_release_handle(int id, void *ptr, void *data)
252{
253 struct drm_file *file_priv = data;
254 struct drm_gem_object *obj = ptr;
255
256 if (obj->funcs->close)
257 obj->funcs->close(obj, file_priv);
258
259 drm_prime_remove_buf_handle(prime_fpriv: &file_priv->prime, handle: id);
260 drm_vma_node_revoke(node: &obj->vma_node, tag: file_priv);
261
262 drm_gem_object_handle_put_unlocked(obj);
263
264 return 0;
265}
266
267/**
268 * drm_gem_handle_delete - deletes the given file-private handle
269 * @filp: drm file-private structure to use for the handle look up
270 * @handle: userspace handle to delete
271 *
272 * Removes the GEM handle from the @filp lookup table which has been added with
273 * drm_gem_handle_create(). If this is the last handle also cleans up linked
274 * resources like GEM names.
275 */
276int
277drm_gem_handle_delete(struct drm_file *filp, u32 handle)
278{
279 struct drm_gem_object *obj;
280
281 spin_lock(lock: &filp->table_lock);
282
283 /* Check if we currently have a reference on the object */
284 obj = idr_replace(&filp->object_idr, NULL, id: handle);
285 spin_unlock(lock: &filp->table_lock);
286 if (IS_ERR_OR_NULL(ptr: obj))
287 return -EINVAL;
288
289 /* Release driver's reference and decrement refcount. */
290 drm_gem_object_release_handle(id: handle, ptr: obj, data: filp);
291
292 /* And finally make the handle available for future allocations. */
293 spin_lock(lock: &filp->table_lock);
294 idr_remove(&filp->object_idr, id: handle);
295 spin_unlock(lock: &filp->table_lock);
296
297 return 0;
298}
299EXPORT_SYMBOL(drm_gem_handle_delete);
300
301/**
302 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
303 * @file: drm file-private structure containing the gem object
304 * @dev: corresponding drm_device
305 * @handle: gem object handle
306 * @offset: return location for the fake mmap offset
307 *
308 * This implements the &drm_driver.dumb_map_offset kms driver callback for
309 * drivers which use gem to manage their backing storage.
310 *
311 * Returns:
312 * 0 on success or a negative error code on failure.
313 */
314int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
315 u32 handle, u64 *offset)
316{
317 struct drm_gem_object *obj;
318 int ret;
319
320 obj = drm_gem_object_lookup(filp: file, handle);
321 if (!obj)
322 return -ENOENT;
323
324 /* Don't allow imported objects to be mapped */
325 if (obj->import_attach) {
326 ret = -EINVAL;
327 goto out;
328 }
329
330 ret = drm_gem_create_mmap_offset(obj);
331 if (ret)
332 goto out;
333
334 *offset = drm_vma_node_offset_addr(node: &obj->vma_node);
335out:
336 drm_gem_object_put(obj);
337
338 return ret;
339}
340EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
341
342/**
343 * drm_gem_handle_create_tail - internal functions to create a handle
344 * @file_priv: drm file-private structure to register the handle for
345 * @obj: object to register
346 * @handlep: pointer to return the created handle to the caller
347 *
348 * This expects the &drm_device.object_name_lock to be held already and will
349 * drop it before returning. Used to avoid races in establishing new handles
350 * when importing an object from either an flink name or a dma-buf.
351 *
352 * Handles must be release again through drm_gem_handle_delete(). This is done
353 * when userspace closes @file_priv for all attached handles, or through the
354 * GEM_CLOSE ioctl for individual handles.
355 */
356int
357drm_gem_handle_create_tail(struct drm_file *file_priv,
358 struct drm_gem_object *obj,
359 u32 *handlep)
360{
361 struct drm_device *dev = obj->dev;
362 u32 handle;
363 int ret;
364
365 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
366 if (obj->handle_count++ == 0)
367 drm_gem_object_get(obj);
368
369 /*
370 * Get the user-visible handle using idr. Preload and perform
371 * allocation under our spinlock.
372 */
373 idr_preload(GFP_KERNEL);
374 spin_lock(lock: &file_priv->table_lock);
375
376 ret = idr_alloc(&file_priv->object_idr, ptr: obj, start: 1, end: 0, GFP_NOWAIT);
377
378 spin_unlock(lock: &file_priv->table_lock);
379 idr_preload_end();
380
381 mutex_unlock(lock: &dev->object_name_lock);
382 if (ret < 0)
383 goto err_unref;
384
385 handle = ret;
386
387 ret = drm_vma_node_allow(node: &obj->vma_node, tag: file_priv);
388 if (ret)
389 goto err_remove;
390
391 if (obj->funcs->open) {
392 ret = obj->funcs->open(obj, file_priv);
393 if (ret)
394 goto err_revoke;
395 }
396
397 *handlep = handle;
398 return 0;
399
400err_revoke:
401 drm_vma_node_revoke(node: &obj->vma_node, tag: file_priv);
402err_remove:
403 spin_lock(lock: &file_priv->table_lock);
404 idr_remove(&file_priv->object_idr, id: handle);
405 spin_unlock(lock: &file_priv->table_lock);
406err_unref:
407 drm_gem_object_handle_put_unlocked(obj);
408 return ret;
409}
410
411/**
412 * drm_gem_handle_create - create a gem handle for an object
413 * @file_priv: drm file-private structure to register the handle for
414 * @obj: object to register
415 * @handlep: pointer to return the created handle to the caller
416 *
417 * Create a handle for this object. This adds a handle reference to the object,
418 * which includes a regular reference count. Callers will likely want to
419 * dereference the object afterwards.
420 *
421 * Since this publishes @obj to userspace it must be fully set up by this point,
422 * drivers must call this last in their buffer object creation callbacks.
423 */
424int drm_gem_handle_create(struct drm_file *file_priv,
425 struct drm_gem_object *obj,
426 u32 *handlep)
427{
428 mutex_lock(&obj->dev->object_name_lock);
429
430 return drm_gem_handle_create_tail(file_priv, obj, handlep);
431}
432EXPORT_SYMBOL(drm_gem_handle_create);
433
434
435/**
436 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
437 * @obj: obj in question
438 *
439 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
440 *
441 * Note that drm_gem_object_release() already calls this function, so drivers
442 * don't have to take care of releasing the mmap offset themselves when freeing
443 * the GEM object.
444 */
445void
446drm_gem_free_mmap_offset(struct drm_gem_object *obj)
447{
448 struct drm_device *dev = obj->dev;
449
450 drm_vma_offset_remove(mgr: dev->vma_offset_manager, node: &obj->vma_node);
451}
452EXPORT_SYMBOL(drm_gem_free_mmap_offset);
453
454/**
455 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
456 * @obj: obj in question
457 * @size: the virtual size
458 *
459 * GEM memory mapping works by handing back to userspace a fake mmap offset
460 * it can use in a subsequent mmap(2) call. The DRM core code then looks
461 * up the object based on the offset and sets up the various memory mapping
462 * structures.
463 *
464 * This routine allocates and attaches a fake offset for @obj, in cases where
465 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
466 * Otherwise just use drm_gem_create_mmap_offset().
467 *
468 * This function is idempotent and handles an already allocated mmap offset
469 * transparently. Drivers do not need to check for this case.
470 */
471int
472drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
473{
474 struct drm_device *dev = obj->dev;
475
476 return drm_vma_offset_add(mgr: dev->vma_offset_manager, node: &obj->vma_node,
477 pages: size / PAGE_SIZE);
478}
479EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
480
481/**
482 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
483 * @obj: obj in question
484 *
485 * GEM memory mapping works by handing back to userspace a fake mmap offset
486 * it can use in a subsequent mmap(2) call. The DRM core code then looks
487 * up the object based on the offset and sets up the various memory mapping
488 * structures.
489 *
490 * This routine allocates and attaches a fake offset for @obj.
491 *
492 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
493 * the fake offset again.
494 */
495int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
496{
497 return drm_gem_create_mmap_offset_size(obj, obj->size);
498}
499EXPORT_SYMBOL(drm_gem_create_mmap_offset);
500
501/*
502 * Move folios to appropriate lru and release the folios, decrementing the
503 * ref count of those folios.
504 */
505static void drm_gem_check_release_batch(struct folio_batch *fbatch)
506{
507 check_move_unevictable_folios(fbatch);
508 __folio_batch_release(pvec: fbatch);
509 cond_resched();
510}
511
512/**
513 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
514 * from shmem
515 * @obj: obj in question
516 *
517 * This reads the page-array of the shmem-backing storage of the given gem
518 * object. An array of pages is returned. If a page is not allocated or
519 * swapped-out, this will allocate/swap-in the required pages. Note that the
520 * whole object is covered by the page-array and pinned in memory.
521 *
522 * Use drm_gem_put_pages() to release the array and unpin all pages.
523 *
524 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
525 * If you require other GFP-masks, you have to do those allocations yourself.
526 *
527 * Note that you are not allowed to change gfp-zones during runtime. That is,
528 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
529 * set during initialization. If you have special zone constraints, set them
530 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
531 * to keep pages in the required zone during swap-in.
532 *
533 * This function is only valid on objects initialized with
534 * drm_gem_object_init(), but not for those initialized with
535 * drm_gem_private_object_init() only.
536 */
537struct page **drm_gem_get_pages(struct drm_gem_object *obj)
538{
539 struct address_space *mapping;
540 struct page **pages;
541 struct folio *folio;
542 struct folio_batch fbatch;
543 long i, j, npages;
544
545 if (WARN_ON(!obj->filp))
546 return ERR_PTR(error: -EINVAL);
547
548 /* This is the shared memory object that backs the GEM resource */
549 mapping = obj->filp->f_mapping;
550
551 /* We already BUG_ON() for non-page-aligned sizes in
552 * drm_gem_object_init(), so we should never hit this unless
553 * driver author is doing something really wrong:
554 */
555 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
556
557 npages = obj->size >> PAGE_SHIFT;
558
559 pages = kvmalloc_array(n: npages, size: sizeof(struct page *), GFP_KERNEL);
560 if (pages == NULL)
561 return ERR_PTR(error: -ENOMEM);
562
563 mapping_set_unevictable(mapping);
564
565 i = 0;
566 while (i < npages) {
567 long nr;
568 folio = shmem_read_folio_gfp(mapping, index: i,
569 gfp: mapping_gfp_mask(mapping));
570 if (IS_ERR(ptr: folio))
571 goto fail;
572 nr = min(npages - i, folio_nr_pages(folio));
573 for (j = 0; j < nr; j++, i++)
574 pages[i] = folio_file_page(folio, index: i);
575
576 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
577 * correct region during swapin. Note that this requires
578 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
579 * so shmem can relocate pages during swapin if required.
580 */
581 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
582 (folio_pfn(folio) >= 0x00100000UL));
583 }
584
585 return pages;
586
587fail:
588 mapping_clear_unevictable(mapping);
589 folio_batch_init(fbatch: &fbatch);
590 j = 0;
591 while (j < i) {
592 struct folio *f = page_folio(pages[j]);
593 if (!folio_batch_add(fbatch: &fbatch, folio: f))
594 drm_gem_check_release_batch(fbatch: &fbatch);
595 j += folio_nr_pages(folio: f);
596 }
597 if (fbatch.nr)
598 drm_gem_check_release_batch(fbatch: &fbatch);
599
600 kvfree(addr: pages);
601 return ERR_CAST(ptr: folio);
602}
603EXPORT_SYMBOL(drm_gem_get_pages);
604
605/**
606 * drm_gem_put_pages - helper to free backing pages for a GEM object
607 * @obj: obj in question
608 * @pages: pages to free
609 * @dirty: if true, pages will be marked as dirty
610 * @accessed: if true, the pages will be marked as accessed
611 */
612void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
613 bool dirty, bool accessed)
614{
615 int i, npages;
616 struct address_space *mapping;
617 struct folio_batch fbatch;
618
619 mapping = file_inode(f: obj->filp)->i_mapping;
620 mapping_clear_unevictable(mapping);
621
622 /* We already BUG_ON() for non-page-aligned sizes in
623 * drm_gem_object_init(), so we should never hit this unless
624 * driver author is doing something really wrong:
625 */
626 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
627
628 npages = obj->size >> PAGE_SHIFT;
629
630 folio_batch_init(fbatch: &fbatch);
631 for (i = 0; i < npages; i++) {
632 struct folio *folio;
633
634 if (!pages[i])
635 continue;
636 folio = page_folio(pages[i]);
637
638 if (dirty)
639 folio_mark_dirty(folio);
640
641 if (accessed)
642 folio_mark_accessed(folio);
643
644 /* Undo the reference we took when populating the table */
645 if (!folio_batch_add(fbatch: &fbatch, folio))
646 drm_gem_check_release_batch(fbatch: &fbatch);
647 i += folio_nr_pages(folio) - 1;
648 }
649 if (folio_batch_count(fbatch: &fbatch))
650 drm_gem_check_release_batch(fbatch: &fbatch);
651
652 kvfree(addr: pages);
653}
654EXPORT_SYMBOL(drm_gem_put_pages);
655
656static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
657 struct drm_gem_object **objs)
658{
659 int i, ret = 0;
660 struct drm_gem_object *obj;
661
662 spin_lock(lock: &filp->table_lock);
663
664 for (i = 0; i < count; i++) {
665 /* Check if we currently have a reference on the object */
666 obj = idr_find(&filp->object_idr, id: handle[i]);
667 if (!obj) {
668 ret = -ENOENT;
669 break;
670 }
671 drm_gem_object_get(obj);
672 objs[i] = obj;
673 }
674 spin_unlock(lock: &filp->table_lock);
675
676 return ret;
677}
678
679/**
680 * drm_gem_objects_lookup - look up GEM objects from an array of handles
681 * @filp: DRM file private date
682 * @bo_handles: user pointer to array of userspace handle
683 * @count: size of handle array
684 * @objs_out: returned pointer to array of drm_gem_object pointers
685 *
686 * Takes an array of userspace handles and returns a newly allocated array of
687 * GEM objects.
688 *
689 * For a single handle lookup, use drm_gem_object_lookup().
690 *
691 * Returns:
692 *
693 * @objs filled in with GEM object pointers. Returned GEM objects need to be
694 * released with drm_gem_object_put(). -ENOENT is returned on a lookup
695 * failure. 0 is returned on success.
696 *
697 */
698int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
699 int count, struct drm_gem_object ***objs_out)
700{
701 int ret;
702 u32 *handles;
703 struct drm_gem_object **objs;
704
705 if (!count)
706 return 0;
707
708 objs = kvmalloc_array(n: count, size: sizeof(struct drm_gem_object *),
709 GFP_KERNEL | __GFP_ZERO);
710 if (!objs)
711 return -ENOMEM;
712
713 *objs_out = objs;
714
715 handles = kvmalloc_array(n: count, size: sizeof(u32), GFP_KERNEL);
716 if (!handles) {
717 ret = -ENOMEM;
718 goto out;
719 }
720
721 if (copy_from_user(to: handles, from: bo_handles, n: count * sizeof(u32))) {
722 ret = -EFAULT;
723 DRM_DEBUG("Failed to copy in GEM handles\n");
724 goto out;
725 }
726
727 ret = objects_lookup(filp, handle: handles, count, objs);
728out:
729 kvfree(addr: handles);
730 return ret;
731
732}
733EXPORT_SYMBOL(drm_gem_objects_lookup);
734
735/**
736 * drm_gem_object_lookup - look up a GEM object from its handle
737 * @filp: DRM file private date
738 * @handle: userspace handle
739 *
740 * Returns:
741 *
742 * A reference to the object named by the handle if such exists on @filp, NULL
743 * otherwise.
744 *
745 * If looking up an array of handles, use drm_gem_objects_lookup().
746 */
747struct drm_gem_object *
748drm_gem_object_lookup(struct drm_file *filp, u32 handle)
749{
750 struct drm_gem_object *obj = NULL;
751
752 objects_lookup(filp, handle: &handle, count: 1, objs: &obj);
753 return obj;
754}
755EXPORT_SYMBOL(drm_gem_object_lookup);
756
757/**
758 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
759 * shared and/or exclusive fences.
760 * @filep: DRM file private date
761 * @handle: userspace handle
762 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
763 * @timeout: timeout value in jiffies or zero to return immediately
764 *
765 * Returns:
766 *
767 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
768 * greater than 0 on success.
769 */
770long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
771 bool wait_all, unsigned long timeout)
772{
773 long ret;
774 struct drm_gem_object *obj;
775
776 obj = drm_gem_object_lookup(filep, handle);
777 if (!obj) {
778 DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
779 return -EINVAL;
780 }
781
782 ret = dma_resv_wait_timeout(obj: obj->resv, usage: dma_resv_usage_rw(write: wait_all),
783 intr: true, timeout);
784 if (ret == 0)
785 ret = -ETIME;
786 else if (ret > 0)
787 ret = 0;
788
789 drm_gem_object_put(obj);
790
791 return ret;
792}
793EXPORT_SYMBOL(drm_gem_dma_resv_wait);
794
795/**
796 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
797 * @dev: drm_device
798 * @data: ioctl data
799 * @file_priv: drm file-private structure
800 *
801 * Releases the handle to an mm object.
802 */
803int
804drm_gem_close_ioctl(struct drm_device *dev, void *data,
805 struct drm_file *file_priv)
806{
807 struct drm_gem_close *args = data;
808 int ret;
809
810 if (!drm_core_check_feature(dev, feature: DRIVER_GEM))
811 return -EOPNOTSUPP;
812
813 ret = drm_gem_handle_delete(file_priv, args->handle);
814
815 return ret;
816}
817
818/**
819 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
820 * @dev: drm_device
821 * @data: ioctl data
822 * @file_priv: drm file-private structure
823 *
824 * Create a global name for an object, returning the name.
825 *
826 * Note that the name does not hold a reference; when the object
827 * is freed, the name goes away.
828 */
829int
830drm_gem_flink_ioctl(struct drm_device *dev, void *data,
831 struct drm_file *file_priv)
832{
833 struct drm_gem_flink *args = data;
834 struct drm_gem_object *obj;
835 int ret;
836
837 if (!drm_core_check_feature(dev, feature: DRIVER_GEM))
838 return -EOPNOTSUPP;
839
840 obj = drm_gem_object_lookup(file_priv, args->handle);
841 if (obj == NULL)
842 return -ENOENT;
843
844 mutex_lock(&dev->object_name_lock);
845 /* prevent races with concurrent gem_close. */
846 if (obj->handle_count == 0) {
847 ret = -ENOENT;
848 goto err;
849 }
850
851 if (!obj->name) {
852 ret = idr_alloc(&dev->object_name_idr, ptr: obj, start: 1, end: 0, GFP_KERNEL);
853 if (ret < 0)
854 goto err;
855
856 obj->name = ret;
857 }
858
859 args->name = (uint64_t) obj->name;
860 ret = 0;
861
862err:
863 mutex_unlock(lock: &dev->object_name_lock);
864 drm_gem_object_put(obj);
865 return ret;
866}
867
868/**
869 * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
870 * @dev: drm_device
871 * @data: ioctl data
872 * @file_priv: drm file-private structure
873 *
874 * Open an object using the global name, returning a handle and the size.
875 *
876 * This handle (of course) holds a reference to the object, so the object
877 * will not go away until the handle is deleted.
878 */
879int
880drm_gem_open_ioctl(struct drm_device *dev, void *data,
881 struct drm_file *file_priv)
882{
883 struct drm_gem_open *args = data;
884 struct drm_gem_object *obj;
885 int ret;
886 u32 handle;
887
888 if (!drm_core_check_feature(dev, feature: DRIVER_GEM))
889 return -EOPNOTSUPP;
890
891 mutex_lock(&dev->object_name_lock);
892 obj = idr_find(&dev->object_name_idr, id: (int) args->name);
893 if (obj) {
894 drm_gem_object_get(obj);
895 } else {
896 mutex_unlock(lock: &dev->object_name_lock);
897 return -ENOENT;
898 }
899
900 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
901 ret = drm_gem_handle_create_tail(file_priv, obj, handlep: &handle);
902 if (ret)
903 goto err;
904
905 args->handle = handle;
906 args->size = obj->size;
907
908err:
909 drm_gem_object_put(obj);
910 return ret;
911}
912
913/**
914 * drm_gem_open - initializes GEM file-private structures at devnode open time
915 * @dev: drm_device which is being opened by userspace
916 * @file_private: drm file-private structure to set up
917 *
918 * Called at device open time, sets up the structure for handling refcounting
919 * of mm objects.
920 */
921void
922drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
923{
924 idr_init_base(idr: &file_private->object_idr, base: 1);
925 spin_lock_init(&file_private->table_lock);
926}
927
928/**
929 * drm_gem_release - release file-private GEM resources
930 * @dev: drm_device which is being closed by userspace
931 * @file_private: drm file-private structure to clean up
932 *
933 * Called at close time when the filp is going away.
934 *
935 * Releases any remaining references on objects by this filp.
936 */
937void
938drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
939{
940 idr_for_each(&file_private->object_idr,
941 fn: &drm_gem_object_release_handle, data: file_private);
942 idr_destroy(&file_private->object_idr);
943}
944
945/**
946 * drm_gem_object_release - release GEM buffer object resources
947 * @obj: GEM buffer object
948 *
949 * This releases any structures and resources used by @obj and is the inverse of
950 * drm_gem_object_init().
951 */
952void
953drm_gem_object_release(struct drm_gem_object *obj)
954{
955 if (obj->filp)
956 fput(obj->filp);
957
958 drm_gem_private_object_fini(obj);
959
960 drm_gem_free_mmap_offset(obj);
961 drm_gem_lru_remove(obj);
962}
963EXPORT_SYMBOL(drm_gem_object_release);
964
965/**
966 * drm_gem_object_free - free a GEM object
967 * @kref: kref of the object to free
968 *
969 * Called after the last reference to the object has been lost.
970 *
971 * Frees the object
972 */
973void
974drm_gem_object_free(struct kref *kref)
975{
976 struct drm_gem_object *obj =
977 container_of(kref, struct drm_gem_object, refcount);
978
979 if (WARN_ON(!obj->funcs->free))
980 return;
981
982 obj->funcs->free(obj);
983}
984EXPORT_SYMBOL(drm_gem_object_free);
985
986/**
987 * drm_gem_vm_open - vma->ops->open implementation for GEM
988 * @vma: VM area structure
989 *
990 * This function implements the #vm_operations_struct open() callback for GEM
991 * drivers. This must be used together with drm_gem_vm_close().
992 */
993void drm_gem_vm_open(struct vm_area_struct *vma)
994{
995 struct drm_gem_object *obj = vma->vm_private_data;
996
997 drm_gem_object_get(obj);
998}
999EXPORT_SYMBOL(drm_gem_vm_open);
1000
1001/**
1002 * drm_gem_vm_close - vma->ops->close implementation for GEM
1003 * @vma: VM area structure
1004 *
1005 * This function implements the #vm_operations_struct close() callback for GEM
1006 * drivers. This must be used together with drm_gem_vm_open().
1007 */
1008void drm_gem_vm_close(struct vm_area_struct *vma)
1009{
1010 struct drm_gem_object *obj = vma->vm_private_data;
1011
1012 drm_gem_object_put(obj);
1013}
1014EXPORT_SYMBOL(drm_gem_vm_close);
1015
1016/**
1017 * drm_gem_mmap_obj - memory map a GEM object
1018 * @obj: the GEM object to map
1019 * @obj_size: the object size to be mapped, in bytes
1020 * @vma: VMA for the area to be mapped
1021 *
1022 * Set up the VMA to prepare mapping of the GEM object using the GEM object's
1023 * vm_ops. Depending on their requirements, GEM objects can either
1024 * provide a fault handler in their vm_ops (in which case any accesses to
1025 * the object will be trapped, to perform migration, GTT binding, surface
1026 * register allocation, or performance monitoring), or mmap the buffer memory
1027 * synchronously after calling drm_gem_mmap_obj.
1028 *
1029 * This function is mainly intended to implement the DMABUF mmap operation, when
1030 * the GEM object is not looked up based on its fake offset. To implement the
1031 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1032 *
1033 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1034 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1035 * callers must verify access restrictions before calling this helper.
1036 *
1037 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1038 * size, or if no vm_ops are provided.
1039 */
1040int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1041 struct vm_area_struct *vma)
1042{
1043 int ret;
1044
1045 /* Check for valid size. */
1046 if (obj_size < vma->vm_end - vma->vm_start)
1047 return -EINVAL;
1048
1049 /* Take a ref for this mapping of the object, so that the fault
1050 * handler can dereference the mmap offset's pointer to the object.
1051 * This reference is cleaned up by the corresponding vm_close
1052 * (which should happen whether the vma was created by this call, or
1053 * by a vm_open due to mremap or partial unmap or whatever).
1054 */
1055 drm_gem_object_get(obj);
1056
1057 vma->vm_private_data = obj;
1058 vma->vm_ops = obj->funcs->vm_ops;
1059
1060 if (obj->funcs->mmap) {
1061 ret = obj->funcs->mmap(obj, vma);
1062 if (ret)
1063 goto err_drm_gem_object_put;
1064 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1065 } else {
1066 if (!vma->vm_ops) {
1067 ret = -EINVAL;
1068 goto err_drm_gem_object_put;
1069 }
1070
1071 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1072 vma->vm_page_prot = pgprot_writecombine(prot: vm_get_page_prot(vm_flags: vma->vm_flags));
1073 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1074 }
1075
1076 return 0;
1077
1078err_drm_gem_object_put:
1079 drm_gem_object_put(obj);
1080 return ret;
1081}
1082EXPORT_SYMBOL(drm_gem_mmap_obj);
1083
1084/**
1085 * drm_gem_mmap - memory map routine for GEM objects
1086 * @filp: DRM file pointer
1087 * @vma: VMA for the area to be mapped
1088 *
1089 * If a driver supports GEM object mapping, mmap calls on the DRM file
1090 * descriptor will end up here.
1091 *
1092 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1093 * contain the fake offset we created when the GTT map ioctl was called on
1094 * the object) and map it with a call to drm_gem_mmap_obj().
1095 *
1096 * If the caller is not granted access to the buffer object, the mmap will fail
1097 * with EACCES. Please see the vma manager for more information.
1098 */
1099int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1100{
1101 struct drm_file *priv = filp->private_data;
1102 struct drm_device *dev = priv->minor->dev;
1103 struct drm_gem_object *obj = NULL;
1104 struct drm_vma_offset_node *node;
1105 int ret;
1106
1107 if (drm_dev_is_unplugged(dev))
1108 return -ENODEV;
1109
1110 drm_vma_offset_lock_lookup(mgr: dev->vma_offset_manager);
1111 node = drm_vma_offset_exact_lookup_locked(mgr: dev->vma_offset_manager,
1112 start: vma->vm_pgoff,
1113 pages: vma_pages(vma));
1114 if (likely(node)) {
1115 obj = container_of(node, struct drm_gem_object, vma_node);
1116 /*
1117 * When the object is being freed, after it hits 0-refcnt it
1118 * proceeds to tear down the object. In the process it will
1119 * attempt to remove the VMA offset and so acquire this
1120 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1121 * that matches our range, we know it is in the process of being
1122 * destroyed and will be freed as soon as we release the lock -
1123 * so we have to check for the 0-refcnted object and treat it as
1124 * invalid.
1125 */
1126 if (!kref_get_unless_zero(kref: &obj->refcount))
1127 obj = NULL;
1128 }
1129 drm_vma_offset_unlock_lookup(mgr: dev->vma_offset_manager);
1130
1131 if (!obj)
1132 return -EINVAL;
1133
1134 if (!drm_vma_node_is_allowed(node, tag: priv)) {
1135 drm_gem_object_put(obj);
1136 return -EACCES;
1137 }
1138
1139 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1140 vma);
1141
1142 drm_gem_object_put(obj);
1143
1144 return ret;
1145}
1146EXPORT_SYMBOL(drm_gem_mmap);
1147
1148void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1149 const struct drm_gem_object *obj)
1150{
1151 drm_printf_indent(p, indent, "name=%d\n", obj->name);
1152 drm_printf_indent(p, indent, "refcount=%u\n",
1153 kref_read(&obj->refcount));
1154 drm_printf_indent(p, indent, "start=%08lx\n",
1155 drm_vma_node_start(&obj->vma_node));
1156 drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1157 drm_printf_indent(p, indent, "imported=%s\n",
1158 str_yes_no(obj->import_attach));
1159
1160 if (obj->funcs->print_info)
1161 obj->funcs->print_info(p, indent, obj);
1162}
1163
1164int drm_gem_pin(struct drm_gem_object *obj)
1165{
1166 if (obj->funcs->pin)
1167 return obj->funcs->pin(obj);
1168
1169 return 0;
1170}
1171
1172void drm_gem_unpin(struct drm_gem_object *obj)
1173{
1174 if (obj->funcs->unpin)
1175 obj->funcs->unpin(obj);
1176}
1177
1178int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
1179{
1180 int ret;
1181
1182 dma_resv_assert_held(obj->resv);
1183
1184 if (!obj->funcs->vmap)
1185 return -EOPNOTSUPP;
1186
1187 ret = obj->funcs->vmap(obj, map);
1188 if (ret)
1189 return ret;
1190 else if (iosys_map_is_null(map))
1191 return -ENOMEM;
1192
1193 return 0;
1194}
1195EXPORT_SYMBOL(drm_gem_vmap);
1196
1197void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
1198{
1199 dma_resv_assert_held(obj->resv);
1200
1201 if (iosys_map_is_null(map))
1202 return;
1203
1204 if (obj->funcs->vunmap)
1205 obj->funcs->vunmap(obj, map);
1206
1207 /* Always set the mapping to NULL. Callers may rely on this. */
1208 iosys_map_clear(map);
1209}
1210EXPORT_SYMBOL(drm_gem_vunmap);
1211
1212int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
1213{
1214 int ret;
1215
1216 dma_resv_lock(obj: obj->resv, NULL);
1217 ret = drm_gem_vmap(obj, map);
1218 dma_resv_unlock(obj: obj->resv);
1219
1220 return ret;
1221}
1222EXPORT_SYMBOL(drm_gem_vmap_unlocked);
1223
1224void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
1225{
1226 dma_resv_lock(obj: obj->resv, NULL);
1227 drm_gem_vunmap(obj, map);
1228 dma_resv_unlock(obj: obj->resv);
1229}
1230EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
1231
1232/**
1233 * drm_gem_lock_reservations - Sets up the ww context and acquires
1234 * the lock on an array of GEM objects.
1235 *
1236 * Once you've locked your reservations, you'll want to set up space
1237 * for your shared fences (if applicable), submit your job, then
1238 * drm_gem_unlock_reservations().
1239 *
1240 * @objs: drm_gem_objects to lock
1241 * @count: Number of objects in @objs
1242 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1243 * part of tracking this set of locked reservations.
1244 */
1245int
1246drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1247 struct ww_acquire_ctx *acquire_ctx)
1248{
1249 int contended = -1;
1250 int i, ret;
1251
1252 ww_acquire_init(ctx: acquire_ctx, ww_class: &reservation_ww_class);
1253
1254retry:
1255 if (contended != -1) {
1256 struct drm_gem_object *obj = objs[contended];
1257
1258 ret = dma_resv_lock_slow_interruptible(obj: obj->resv,
1259 ctx: acquire_ctx);
1260 if (ret) {
1261 ww_acquire_fini(ctx: acquire_ctx);
1262 return ret;
1263 }
1264 }
1265
1266 for (i = 0; i < count; i++) {
1267 if (i == contended)
1268 continue;
1269
1270 ret = dma_resv_lock_interruptible(obj: objs[i]->resv,
1271 ctx: acquire_ctx);
1272 if (ret) {
1273 int j;
1274
1275 for (j = 0; j < i; j++)
1276 dma_resv_unlock(obj: objs[j]->resv);
1277
1278 if (contended != -1 && contended >= i)
1279 dma_resv_unlock(obj: objs[contended]->resv);
1280
1281 if (ret == -EDEADLK) {
1282 contended = i;
1283 goto retry;
1284 }
1285
1286 ww_acquire_fini(ctx: acquire_ctx);
1287 return ret;
1288 }
1289 }
1290
1291 ww_acquire_done(ctx: acquire_ctx);
1292
1293 return 0;
1294}
1295EXPORT_SYMBOL(drm_gem_lock_reservations);
1296
1297void
1298drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1299 struct ww_acquire_ctx *acquire_ctx)
1300{
1301 int i;
1302
1303 for (i = 0; i < count; i++)
1304 dma_resv_unlock(obj: objs[i]->resv);
1305
1306 ww_acquire_fini(ctx: acquire_ctx);
1307}
1308EXPORT_SYMBOL(drm_gem_unlock_reservations);
1309
1310/**
1311 * drm_gem_lru_init - initialize a LRU
1312 *
1313 * @lru: The LRU to initialize
1314 * @lock: The lock protecting the LRU
1315 */
1316void
1317drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock)
1318{
1319 lru->lock = lock;
1320 lru->count = 0;
1321 INIT_LIST_HEAD(list: &lru->list);
1322}
1323EXPORT_SYMBOL(drm_gem_lru_init);
1324
1325static void
1326drm_gem_lru_remove_locked(struct drm_gem_object *obj)
1327{
1328 obj->lru->count -= obj->size >> PAGE_SHIFT;
1329 WARN_ON(obj->lru->count < 0);
1330 list_del(entry: &obj->lru_node);
1331 obj->lru = NULL;
1332}
1333
1334/**
1335 * drm_gem_lru_remove - remove object from whatever LRU it is in
1336 *
1337 * If the object is currently in any LRU, remove it.
1338 *
1339 * @obj: The GEM object to remove from current LRU
1340 */
1341void
1342drm_gem_lru_remove(struct drm_gem_object *obj)
1343{
1344 struct drm_gem_lru *lru = obj->lru;
1345
1346 if (!lru)
1347 return;
1348
1349 mutex_lock(lru->lock);
1350 drm_gem_lru_remove_locked(obj);
1351 mutex_unlock(lock: lru->lock);
1352}
1353EXPORT_SYMBOL(drm_gem_lru_remove);
1354
1355/**
1356 * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU
1357 *
1358 * Like &drm_gem_lru_move_tail but lru lock must be held
1359 *
1360 * @lru: The LRU to move the object into.
1361 * @obj: The GEM object to move into this LRU
1362 */
1363void
1364drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1365{
1366 lockdep_assert_held_once(lru->lock);
1367
1368 if (obj->lru)
1369 drm_gem_lru_remove_locked(obj);
1370
1371 lru->count += obj->size >> PAGE_SHIFT;
1372 list_add_tail(new: &obj->lru_node, head: &lru->list);
1373 obj->lru = lru;
1374}
1375EXPORT_SYMBOL(drm_gem_lru_move_tail_locked);
1376
1377/**
1378 * drm_gem_lru_move_tail - move the object to the tail of the LRU
1379 *
1380 * If the object is already in this LRU it will be moved to the
1381 * tail. Otherwise it will be removed from whichever other LRU
1382 * it is in (if any) and moved into this LRU.
1383 *
1384 * @lru: The LRU to move the object into.
1385 * @obj: The GEM object to move into this LRU
1386 */
1387void
1388drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1389{
1390 mutex_lock(lru->lock);
1391 drm_gem_lru_move_tail_locked(lru, obj);
1392 mutex_unlock(lock: lru->lock);
1393}
1394EXPORT_SYMBOL(drm_gem_lru_move_tail);
1395
1396/**
1397 * drm_gem_lru_scan - helper to implement shrinker.scan_objects
1398 *
1399 * If the shrink callback succeeds, it is expected that the driver
1400 * move the object out of this LRU.
1401 *
1402 * If the LRU possibly contain active buffers, it is the responsibility
1403 * of the shrink callback to check for this (ie. dma_resv_test_signaled())
1404 * or if necessary block until the buffer becomes idle.
1405 *
1406 * @lru: The LRU to scan
1407 * @nr_to_scan: The number of pages to try to reclaim
1408 * @remaining: The number of pages left to reclaim, should be initialized by caller
1409 * @shrink: Callback to try to shrink/reclaim the object.
1410 */
1411unsigned long
1412drm_gem_lru_scan(struct drm_gem_lru *lru,
1413 unsigned int nr_to_scan,
1414 unsigned long *remaining,
1415 bool (*shrink)(struct drm_gem_object *obj))
1416{
1417 struct drm_gem_lru still_in_lru;
1418 struct drm_gem_object *obj;
1419 unsigned freed = 0;
1420
1421 drm_gem_lru_init(&still_in_lru, lru->lock);
1422
1423 mutex_lock(lru->lock);
1424
1425 while (freed < nr_to_scan) {
1426 obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node);
1427
1428 if (!obj)
1429 break;
1430
1431 drm_gem_lru_move_tail_locked(&still_in_lru, obj);
1432
1433 /*
1434 * If it's in the process of being freed, gem_object->free()
1435 * may be blocked on lock waiting to remove it. So just
1436 * skip it.
1437 */
1438 if (!kref_get_unless_zero(kref: &obj->refcount))
1439 continue;
1440
1441 /*
1442 * Now that we own a reference, we can drop the lock for the
1443 * rest of the loop body, to reduce contention with other
1444 * code paths that need the LRU lock
1445 */
1446 mutex_unlock(lock: lru->lock);
1447
1448 /*
1449 * Note that this still needs to be trylock, since we can
1450 * hit shrinker in response to trying to get backing pages
1451 * for this obj (ie. while it's lock is already held)
1452 */
1453 if (!dma_resv_trylock(obj: obj->resv)) {
1454 *remaining += obj->size >> PAGE_SHIFT;
1455 goto tail;
1456 }
1457
1458 if (shrink(obj)) {
1459 freed += obj->size >> PAGE_SHIFT;
1460
1461 /*
1462 * If we succeeded in releasing the object's backing
1463 * pages, we expect the driver to have moved the object
1464 * out of this LRU
1465 */
1466 WARN_ON(obj->lru == &still_in_lru);
1467 WARN_ON(obj->lru == lru);
1468 }
1469
1470 dma_resv_unlock(obj: obj->resv);
1471
1472tail:
1473 drm_gem_object_put(obj);
1474 mutex_lock(lru->lock);
1475 }
1476
1477 /*
1478 * Move objects we've skipped over out of the temporary still_in_lru
1479 * back into this LRU
1480 */
1481 list_for_each_entry (obj, &still_in_lru.list, lru_node)
1482 obj->lru = lru;
1483 list_splice_tail(list: &still_in_lru.list, head: &lru->list);
1484 lru->count += still_in_lru.count;
1485
1486 mutex_unlock(lock: lru->lock);
1487
1488 return freed;
1489}
1490EXPORT_SYMBOL(drm_gem_lru_scan);
1491
1492/**
1493 * drm_gem_evict - helper to evict backing pages for a GEM object
1494 * @obj: obj in question
1495 */
1496int drm_gem_evict(struct drm_gem_object *obj)
1497{
1498 dma_resv_assert_held(obj->resv);
1499
1500 if (!dma_resv_test_signaled(obj: obj->resv, usage: DMA_RESV_USAGE_READ))
1501 return -EBUSY;
1502
1503 if (obj->funcs->evict)
1504 return obj->funcs->evict(obj);
1505
1506 return 0;
1507}
1508EXPORT_SYMBOL(drm_gem_evict);
1509

source code of linux/drivers/gpu/drm/drm_gem.c