1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2
3#ifndef __DRM_GPUVM_H__
4#define __DRM_GPUVM_H__
5
6/*
7 * Copyright (c) 2022 Red Hat.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28#include <linux/list.h>
29#include <linux/rbtree.h>
30#include <linux/types.h>
31
32#include <drm/drm_gem.h>
33
34struct drm_gpuvm;
35struct drm_gpuvm_ops;
36
37/**
38 * enum drm_gpuva_flags - flags for struct drm_gpuva
39 */
40enum drm_gpuva_flags {
41 /**
42 * @DRM_GPUVA_INVALIDATED:
43 *
44 * Flag indicating that the &drm_gpuva's backing GEM is invalidated.
45 */
46 DRM_GPUVA_INVALIDATED = (1 << 0),
47
48 /**
49 * @DRM_GPUVA_SPARSE:
50 *
51 * Flag indicating that the &drm_gpuva is a sparse mapping.
52 */
53 DRM_GPUVA_SPARSE = (1 << 1),
54
55 /**
56 * @DRM_GPUVA_USERBITS: user defined bits
57 */
58 DRM_GPUVA_USERBITS = (1 << 2),
59};
60
61/**
62 * struct drm_gpuva - structure to track a GPU VA mapping
63 *
64 * This structure represents a GPU VA mapping and is associated with a
65 * &drm_gpuvm.
66 *
67 * Typically, this structure is embedded in bigger driver structures.
68 */
69struct drm_gpuva {
70 /**
71 * @vm: the &drm_gpuvm this object is associated with
72 */
73 struct drm_gpuvm *vm;
74
75 /**
76 * @flags: the &drm_gpuva_flags for this mapping
77 */
78 enum drm_gpuva_flags flags;
79
80 /**
81 * @va: structure containing the address and range of the &drm_gpuva
82 */
83 struct {
84 /**
85 * @addr: the start address
86 */
87 u64 addr;
88
89 /*
90 * @range: the range
91 */
92 u64 range;
93 } va;
94
95 /**
96 * @gem: structure containing the &drm_gem_object and it's offset
97 */
98 struct {
99 /**
100 * @offset: the offset within the &drm_gem_object
101 */
102 u64 offset;
103
104 /**
105 * @obj: the mapped &drm_gem_object
106 */
107 struct drm_gem_object *obj;
108
109 /**
110 * @entry: the &list_head to attach this object to a &drm_gem_object
111 */
112 struct list_head entry;
113 } gem;
114
115 /**
116 * @rb: structure containing data to store &drm_gpuvas in a rb-tree
117 */
118 struct {
119 /**
120 * @rb: the rb-tree node
121 */
122 struct rb_node node;
123
124 /**
125 * @entry: The &list_head to additionally connect &drm_gpuvas
126 * in the same order they appear in the interval tree. This is
127 * useful to keep iterating &drm_gpuvas from a start node found
128 * through the rb-tree while doing modifications on the rb-tree
129 * itself.
130 */
131 struct list_head entry;
132
133 /**
134 * @__subtree_last: needed by the interval tree, holding last-in-subtree
135 */
136 u64 __subtree_last;
137 } rb;
138};
139
140int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va);
141void drm_gpuva_remove(struct drm_gpuva *va);
142
143void drm_gpuva_link(struct drm_gpuva *va);
144void drm_gpuva_unlink(struct drm_gpuva *va);
145
146struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm,
147 u64 addr, u64 range);
148struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
149 u64 addr, u64 range);
150struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start);
151struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end);
152
153static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
154 struct drm_gem_object *obj, u64 offset)
155{
156 va->va.addr = addr;
157 va->va.range = range;
158 va->gem.obj = obj;
159 va->gem.offset = offset;
160}
161
162/**
163 * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
164 * invalidated
165 * @va: the &drm_gpuva to set the invalidate flag for
166 * @invalidate: indicates whether the &drm_gpuva is invalidated
167 */
168static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
169{
170 if (invalidate)
171 va->flags |= DRM_GPUVA_INVALIDATED;
172 else
173 va->flags &= ~DRM_GPUVA_INVALIDATED;
174}
175
176/**
177 * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
178 * is invalidated
179 * @va: the &drm_gpuva to check
180 */
181static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
182{
183 return va->flags & DRM_GPUVA_INVALIDATED;
184}
185
186/**
187 * struct drm_gpuvm - DRM GPU VA Manager
188 *
189 * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
190 * &maple_tree structures. Typically, this structure is embedded in bigger
191 * driver structures.
192 *
193 * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or
194 * pages.
195 *
196 * There should be one manager instance per GPU virtual address space.
197 */
198struct drm_gpuvm {
199 /**
200 * @name: the name of the DRM GPU VA space
201 */
202 const char *name;
203
204 /**
205 * @mm_start: start of the VA space
206 */
207 u64 mm_start;
208
209 /**
210 * @mm_range: length of the VA space
211 */
212 u64 mm_range;
213
214 /**
215 * @rb: structures to track &drm_gpuva entries
216 */
217 struct {
218 /**
219 * @tree: the rb-tree to track GPU VA mappings
220 */
221 struct rb_root_cached tree;
222
223 /**
224 * @list: the &list_head to track GPU VA mappings
225 */
226 struct list_head list;
227 } rb;
228
229 /**
230 * @kernel_alloc_node:
231 *
232 * &drm_gpuva representing the address space cutout reserved for
233 * the kernel
234 */
235 struct drm_gpuva kernel_alloc_node;
236
237 /**
238 * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers
239 */
240 const struct drm_gpuvm_ops *ops;
241};
242
243void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
244 u64 start_offset, u64 range,
245 u64 reserve_offset, u64 reserve_range,
246 const struct drm_gpuvm_ops *ops);
247void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm);
248
249bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
250
251static inline struct drm_gpuva *
252__drm_gpuva_next(struct drm_gpuva *va)
253{
254 if (va && !list_is_last(list: &va->rb.entry, head: &va->vm->rb.list))
255 return list_next_entry(va, rb.entry);
256
257 return NULL;
258}
259
260/**
261 * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas
262 * @va__: &drm_gpuva structure to assign to in each iteration step
263 * @gpuvm__: &drm_gpuvm to walk over
264 * @start__: starting offset, the first gpuva will overlap this
265 * @end__: ending offset, the last gpuva will start before this (but may
266 * overlap)
267 *
268 * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
269 * between @start__ and @end__. It is implemented similarly to list_for_each(),
270 * but is using the &drm_gpuvm's internal interval tree to accelerate
271 * the search for the starting &drm_gpuva, and hence isn't safe against removal
272 * of elements. It assumes that @end__ is within (or is the upper limit of) the
273 * &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's
274 * @kernel_alloc_node.
275 */
276#define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \
277 for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \
278 va__ && (va__->va.addr < (end__)); \
279 va__ = __drm_gpuva_next(va__))
280
281/**
282 * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of
283 * &drm_gpuvas
284 * @va__: &drm_gpuva to assign to in each iteration step
285 * @next__: another &drm_gpuva to use as temporary storage
286 * @gpuvm__: &drm_gpuvm to walk over
287 * @start__: starting offset, the first gpuva will overlap this
288 * @end__: ending offset, the last gpuva will start before this (but may
289 * overlap)
290 *
291 * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
292 * between @start__ and @end__. It is implemented similarly to
293 * list_for_each_safe(), but is using the &drm_gpuvm's internal interval
294 * tree to accelerate the search for the starting &drm_gpuva, and hence is safe
295 * against removal of elements. It assumes that @end__ is within (or is the
296 * upper limit of) the &drm_gpuvm. This iterator does not skip over the
297 * &drm_gpuvm's @kernel_alloc_node.
298 */
299#define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \
300 for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \
301 next__ = __drm_gpuva_next(va__); \
302 va__ && (va__->va.addr < (end__)); \
303 va__ = next__, next__ = __drm_gpuva_next(va__))
304
305/**
306 * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas
307 * @va__: &drm_gpuva to assign to in each iteration step
308 * @gpuvm__: &drm_gpuvm to walk over
309 *
310 * This iterator walks over all &drm_gpuva structures associated with the given
311 * &drm_gpuvm.
312 */
313#define drm_gpuvm_for_each_va(va__, gpuvm__) \
314 list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry)
315
316/**
317 * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas
318 * @va__: &drm_gpuva to assign to in each iteration step
319 * @next__: another &drm_gpuva to use as temporary storage
320 * @gpuvm__: &drm_gpuvm to walk over
321 *
322 * This iterator walks over all &drm_gpuva structures associated with the given
323 * &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and
324 * hence safe against the removal of elements.
325 */
326#define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \
327 list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry)
328
329/**
330 * enum drm_gpuva_op_type - GPU VA operation type
331 *
332 * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm.
333 */
334enum drm_gpuva_op_type {
335 /**
336 * @DRM_GPUVA_OP_MAP: the map op type
337 */
338 DRM_GPUVA_OP_MAP,
339
340 /**
341 * @DRM_GPUVA_OP_REMAP: the remap op type
342 */
343 DRM_GPUVA_OP_REMAP,
344
345 /**
346 * @DRM_GPUVA_OP_UNMAP: the unmap op type
347 */
348 DRM_GPUVA_OP_UNMAP,
349
350 /**
351 * @DRM_GPUVA_OP_PREFETCH: the prefetch op type
352 */
353 DRM_GPUVA_OP_PREFETCH,
354};
355
356/**
357 * struct drm_gpuva_op_map - GPU VA map operation
358 *
359 * This structure represents a single map operation generated by the
360 * DRM GPU VA manager.
361 */
362struct drm_gpuva_op_map {
363 /**
364 * @va: structure containing address and range of a map
365 * operation
366 */
367 struct {
368 /**
369 * @addr: the base address of the new mapping
370 */
371 u64 addr;
372
373 /**
374 * @range: the range of the new mapping
375 */
376 u64 range;
377 } va;
378
379 /**
380 * @gem: structure containing the &drm_gem_object and it's offset
381 */
382 struct {
383 /**
384 * @offset: the offset within the &drm_gem_object
385 */
386 u64 offset;
387
388 /**
389 * @obj: the &drm_gem_object to map
390 */
391 struct drm_gem_object *obj;
392 } gem;
393};
394
395/**
396 * struct drm_gpuva_op_unmap - GPU VA unmap operation
397 *
398 * This structure represents a single unmap operation generated by the
399 * DRM GPU VA manager.
400 */
401struct drm_gpuva_op_unmap {
402 /**
403 * @va: the &drm_gpuva to unmap
404 */
405 struct drm_gpuva *va;
406
407 /**
408 * @keep:
409 *
410 * Indicates whether this &drm_gpuva is physically contiguous with the
411 * original mapping request.
412 *
413 * Optionally, if &keep is set, drivers may keep the actual page table
414 * mappings for this &drm_gpuva, adding the missing page table entries
415 * only and update the &drm_gpuvm accordingly.
416 */
417 bool keep;
418};
419
420/**
421 * struct drm_gpuva_op_remap - GPU VA remap operation
422 *
423 * This represents a single remap operation generated by the DRM GPU VA manager.
424 *
425 * A remap operation is generated when an existing GPU VA mmapping is split up
426 * by inserting a new GPU VA mapping or by partially unmapping existent
427 * mapping(s), hence it consists of a maximum of two map and one unmap
428 * operation.
429 *
430 * The @unmap operation takes care of removing the original existing mapping.
431 * @prev is used to remap the preceding part, @next the subsequent part.
432 *
433 * If either a new mapping's start address is aligned with the start address
434 * of the old mapping or the new mapping's end address is aligned with the
435 * end address of the old mapping, either @prev or @next is NULL.
436 *
437 * Note, the reason for a dedicated remap operation, rather than arbitrary
438 * unmap and map operations, is to give drivers the chance of extracting driver
439 * specific data for creating the new mappings from the unmap operations's
440 * &drm_gpuva structure which typically is embedded in larger driver specific
441 * structures.
442 */
443struct drm_gpuva_op_remap {
444 /**
445 * @prev: the preceding part of a split mapping
446 */
447 struct drm_gpuva_op_map *prev;
448
449 /**
450 * @next: the subsequent part of a split mapping
451 */
452 struct drm_gpuva_op_map *next;
453
454 /**
455 * @unmap: the unmap operation for the original existing mapping
456 */
457 struct drm_gpuva_op_unmap *unmap;
458};
459
460/**
461 * struct drm_gpuva_op_prefetch - GPU VA prefetch operation
462 *
463 * This structure represents a single prefetch operation generated by the
464 * DRM GPU VA manager.
465 */
466struct drm_gpuva_op_prefetch {
467 /**
468 * @va: the &drm_gpuva to prefetch
469 */
470 struct drm_gpuva *va;
471};
472
473/**
474 * struct drm_gpuva_op - GPU VA operation
475 *
476 * This structure represents a single generic operation.
477 *
478 * The particular type of the operation is defined by @op.
479 */
480struct drm_gpuva_op {
481 /**
482 * @entry:
483 *
484 * The &list_head used to distribute instances of this struct within
485 * &drm_gpuva_ops.
486 */
487 struct list_head entry;
488
489 /**
490 * @op: the type of the operation
491 */
492 enum drm_gpuva_op_type op;
493
494 union {
495 /**
496 * @map: the map operation
497 */
498 struct drm_gpuva_op_map map;
499
500 /**
501 * @remap: the remap operation
502 */
503 struct drm_gpuva_op_remap remap;
504
505 /**
506 * @unmap: the unmap operation
507 */
508 struct drm_gpuva_op_unmap unmap;
509
510 /**
511 * @prefetch: the prefetch operation
512 */
513 struct drm_gpuva_op_prefetch prefetch;
514 };
515};
516
517/**
518 * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op
519 */
520struct drm_gpuva_ops {
521 /**
522 * @list: the &list_head
523 */
524 struct list_head list;
525};
526
527/**
528 * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops
529 * @op: &drm_gpuva_op to assign in each iteration step
530 * @ops: &drm_gpuva_ops to walk
531 *
532 * This iterator walks over all ops within a given list of operations.
533 */
534#define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry)
535
536/**
537 * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops
538 * @op: &drm_gpuva_op to assign in each iteration step
539 * @next: &next &drm_gpuva_op to store the next step
540 * @ops: &drm_gpuva_ops to walk
541 *
542 * This iterator walks over all ops within a given list of operations. It is
543 * implemented with list_for_each_safe(), so save against removal of elements.
544 */
545#define drm_gpuva_for_each_op_safe(op, next, ops) \
546 list_for_each_entry_safe(op, next, &(ops)->list, entry)
547
548/**
549 * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point
550 * @op: &drm_gpuva_op to assign in each iteration step
551 * @ops: &drm_gpuva_ops to walk
552 *
553 * This iterator walks over all ops within a given list of operations beginning
554 * from the given operation in reverse order.
555 */
556#define drm_gpuva_for_each_op_from_reverse(op, ops) \
557 list_for_each_entry_from_reverse(op, &(ops)->list, entry)
558
559/**
560 * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
561 * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from
562 */
563#define drm_gpuva_first_op(ops) \
564 list_first_entry(&(ops)->list, struct drm_gpuva_op, entry)
565
566/**
567 * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops
568 * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from
569 */
570#define drm_gpuva_last_op(ops) \
571 list_last_entry(&(ops)->list, struct drm_gpuva_op, entry)
572
573/**
574 * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list
575 * @op: the current &drm_gpuva_op
576 */
577#define drm_gpuva_prev_op(op) list_prev_entry(op, entry)
578
579/**
580 * drm_gpuva_next_op() - next &drm_gpuva_op in the list
581 * @op: the current &drm_gpuva_op
582 */
583#define drm_gpuva_next_op(op) list_next_entry(op, entry)
584
585struct drm_gpuva_ops *
586drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
587 u64 addr, u64 range,
588 struct drm_gem_object *obj, u64 offset);
589struct drm_gpuva_ops *
590drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
591 u64 addr, u64 range);
592
593struct drm_gpuva_ops *
594drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
595 u64 addr, u64 range);
596
597struct drm_gpuva_ops *
598drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm,
599 struct drm_gem_object *obj);
600
601void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
602 struct drm_gpuva_ops *ops);
603
604static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
605 struct drm_gpuva_op_map *op)
606{
607 drm_gpuva_init(va, addr: op->va.addr, range: op->va.range,
608 obj: op->gem.obj, offset: op->gem.offset);
609}
610
611/**
612 * struct drm_gpuvm_ops - callbacks for split/merge steps
613 *
614 * This structure defines the callbacks used by &drm_gpuvm_sm_map and
615 * &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap
616 * operations to drivers.
617 */
618struct drm_gpuvm_ops {
619 /**
620 * @op_alloc: called when the &drm_gpuvm allocates
621 * a struct drm_gpuva_op
622 *
623 * Some drivers may want to embed struct drm_gpuva_op into driver
624 * specific structures. By implementing this callback drivers can
625 * allocate memory accordingly.
626 *
627 * This callback is optional.
628 */
629 struct drm_gpuva_op *(*op_alloc)(void);
630
631 /**
632 * @op_free: called when the &drm_gpuvm frees a
633 * struct drm_gpuva_op
634 *
635 * Some drivers may want to embed struct drm_gpuva_op into driver
636 * specific structures. By implementing this callback drivers can
637 * free the previously allocated memory accordingly.
638 *
639 * This callback is optional.
640 */
641 void (*op_free)(struct drm_gpuva_op *op);
642
643 /**
644 * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the
645 * mapping once all previous steps were completed
646 *
647 * The &priv pointer matches the one the driver passed to
648 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
649 *
650 * Can be NULL if &drm_gpuvm_sm_map is used.
651 */
652 int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
653
654 /**
655 * @sm_step_remap: called from &drm_gpuvm_sm_map and
656 * &drm_gpuvm_sm_unmap to split up an existent mapping
657 *
658 * This callback is called when existent mapping needs to be split up.
659 * This is the case when either a newly requested mapping overlaps or
660 * is enclosed by an existent mapping or a partial unmap of an existent
661 * mapping is requested.
662 *
663 * The &priv pointer matches the one the driver passed to
664 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
665 *
666 * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
667 * used.
668 */
669 int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
670
671 /**
672 * @sm_step_unmap: called from &drm_gpuvm_sm_map and
673 * &drm_gpuvm_sm_unmap to unmap an existent mapping
674 *
675 * This callback is called when existent mapping needs to be unmapped.
676 * This is the case when either a newly requested mapping encloses an
677 * existent mapping or an unmap of an existent mapping is requested.
678 *
679 * The &priv pointer matches the one the driver passed to
680 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
681 *
682 * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
683 * used.
684 */
685 int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
686};
687
688int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
689 u64 addr, u64 range,
690 struct drm_gem_object *obj, u64 offset);
691
692int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
693 u64 addr, u64 range);
694
695void drm_gpuva_map(struct drm_gpuvm *gpuvm,
696 struct drm_gpuva *va,
697 struct drm_gpuva_op_map *op);
698
699void drm_gpuva_remap(struct drm_gpuva *prev,
700 struct drm_gpuva *next,
701 struct drm_gpuva_op_remap *op);
702
703void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
704
705#endif /* __DRM_GPUVM_H__ */
706

source code of linux/include/drm/drm_gpuvm.h