1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
20 | * |
21 | * The above copyright notice and this permission notice (including the |
22 | * next paragraph) shall be included in all copies or substantial portions |
23 | * of the Software. |
24 | * |
25 | */ |
26 | /* |
27 | * Authors: |
28 | * Jerome Glisse <glisse@freedesktop.org> |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
30 | * Dave Airlie |
31 | */ |
32 | #include <linux/list.h> |
33 | #include <linux/slab.h> |
34 | #include <linux/dma-buf.h> |
35 | |
36 | #include <drm/drm_drv.h> |
37 | #include <drm/amdgpu_drm.h> |
38 | #include <drm/drm_cache.h> |
39 | #include "amdgpu.h" |
40 | #include "amdgpu_trace.h" |
41 | #include "amdgpu_amdkfd.h" |
42 | |
43 | /** |
44 | * DOC: amdgpu_object |
45 | * |
46 | * This defines the interfaces to operate on an &amdgpu_bo buffer object which |
47 | * represents memory used by driver (VRAM, system memory, etc.). The driver |
48 | * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces |
49 | * to create/destroy/set buffer object which are then managed by the kernel TTM |
50 | * memory manager. |
51 | * The interfaces are also used internally by kernel clients, including gfx, |
52 | * uvd, etc. for kernel managed allocations used by the GPU. |
53 | * |
54 | */ |
55 | |
56 | static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) |
57 | { |
58 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); |
59 | |
60 | amdgpu_bo_kunmap(bo); |
61 | |
62 | if (bo->tbo.base.import_attach) |
63 | drm_prime_gem_destroy(obj: &bo->tbo.base, sg: bo->tbo.sg); |
64 | drm_gem_object_release(obj: &bo->tbo.base); |
65 | amdgpu_bo_unref(bo: &bo->parent); |
66 | kvfree(addr: bo); |
67 | } |
68 | |
69 | static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo) |
70 | { |
71 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); |
72 | struct amdgpu_bo_user *ubo; |
73 | |
74 | ubo = to_amdgpu_bo_user(bo); |
75 | kfree(objp: ubo->metadata); |
76 | amdgpu_bo_destroy(tbo); |
77 | } |
78 | |
79 | static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo) |
80 | { |
81 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: tbo->bdev); |
82 | struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo; |
83 | struct amdgpu_bo_vm *vmbo; |
84 | |
85 | bo = shadow_bo->parent; |
86 | vmbo = to_amdgpu_bo_vm(bo); |
87 | /* in case amdgpu_device_recover_vram got NULL of bo->parent */ |
88 | if (!list_empty(head: &vmbo->shadow_list)) { |
89 | mutex_lock(&adev->shadow_list_lock); |
90 | list_del_init(entry: &vmbo->shadow_list); |
91 | mutex_unlock(lock: &adev->shadow_list_lock); |
92 | } |
93 | |
94 | amdgpu_bo_destroy(tbo); |
95 | } |
96 | |
97 | /** |
98 | * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo |
99 | * @bo: buffer object to be checked |
100 | * |
101 | * Uses destroy function associated with the object to determine if this is |
102 | * an &amdgpu_bo. |
103 | * |
104 | * Returns: |
105 | * true if the object belongs to &amdgpu_bo, false if not. |
106 | */ |
107 | bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) |
108 | { |
109 | if (bo->destroy == &amdgpu_bo_destroy || |
110 | bo->destroy == &amdgpu_bo_user_destroy || |
111 | bo->destroy == &amdgpu_bo_vm_destroy) |
112 | return true; |
113 | |
114 | return false; |
115 | } |
116 | |
117 | /** |
118 | * amdgpu_bo_placement_from_domain - set buffer's placement |
119 | * @abo: &amdgpu_bo buffer object whose placement is to be set |
120 | * @domain: requested domain |
121 | * |
122 | * Sets buffer's placement according to requested domain and the buffer's |
123 | * flags. |
124 | */ |
125 | void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) |
126 | { |
127 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: abo->tbo.bdev); |
128 | struct ttm_placement *placement = &abo->placement; |
129 | struct ttm_place *places = abo->placements; |
130 | u64 flags = abo->flags; |
131 | u32 c = 0; |
132 | |
133 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { |
134 | unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; |
135 | int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); |
136 | |
137 | if (adev->gmc.mem_partitions && mem_id >= 0) { |
138 | places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn; |
139 | /* |
140 | * memory partition range lpfn is inclusive start + size - 1 |
141 | * TTM place lpfn is exclusive start + size |
142 | */ |
143 | places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1; |
144 | } else { |
145 | places[c].fpfn = 0; |
146 | places[c].lpfn = 0; |
147 | } |
148 | places[c].mem_type = TTM_PL_VRAM; |
149 | places[c].flags = 0; |
150 | |
151 | if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) |
152 | places[c].lpfn = min_not_zero(places[c].lpfn, visible_pfn); |
153 | else |
154 | places[c].flags |= TTM_PL_FLAG_TOPDOWN; |
155 | |
156 | if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) |
157 | places[c].flags |= TTM_PL_FLAG_CONTIGUOUS; |
158 | c++; |
159 | } |
160 | |
161 | if (domain & AMDGPU_GEM_DOMAIN_DOORBELL) { |
162 | places[c].fpfn = 0; |
163 | places[c].lpfn = 0; |
164 | places[c].mem_type = AMDGPU_PL_DOORBELL; |
165 | places[c].flags = 0; |
166 | c++; |
167 | } |
168 | |
169 | if (domain & AMDGPU_GEM_DOMAIN_GTT) { |
170 | places[c].fpfn = 0; |
171 | places[c].lpfn = 0; |
172 | places[c].mem_type = |
173 | abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ? |
174 | AMDGPU_PL_PREEMPT : TTM_PL_TT; |
175 | places[c].flags = 0; |
176 | c++; |
177 | } |
178 | |
179 | if (domain & AMDGPU_GEM_DOMAIN_CPU) { |
180 | places[c].fpfn = 0; |
181 | places[c].lpfn = 0; |
182 | places[c].mem_type = TTM_PL_SYSTEM; |
183 | places[c].flags = 0; |
184 | c++; |
185 | } |
186 | |
187 | if (domain & AMDGPU_GEM_DOMAIN_GDS) { |
188 | places[c].fpfn = 0; |
189 | places[c].lpfn = 0; |
190 | places[c].mem_type = AMDGPU_PL_GDS; |
191 | places[c].flags = 0; |
192 | c++; |
193 | } |
194 | |
195 | if (domain & AMDGPU_GEM_DOMAIN_GWS) { |
196 | places[c].fpfn = 0; |
197 | places[c].lpfn = 0; |
198 | places[c].mem_type = AMDGPU_PL_GWS; |
199 | places[c].flags = 0; |
200 | c++; |
201 | } |
202 | |
203 | if (domain & AMDGPU_GEM_DOMAIN_OA) { |
204 | places[c].fpfn = 0; |
205 | places[c].lpfn = 0; |
206 | places[c].mem_type = AMDGPU_PL_OA; |
207 | places[c].flags = 0; |
208 | c++; |
209 | } |
210 | |
211 | if (!c) { |
212 | places[c].fpfn = 0; |
213 | places[c].lpfn = 0; |
214 | places[c].mem_type = TTM_PL_SYSTEM; |
215 | places[c].flags = 0; |
216 | c++; |
217 | } |
218 | |
219 | BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS); |
220 | |
221 | placement->num_placement = c; |
222 | placement->placement = places; |
223 | |
224 | placement->num_busy_placement = c; |
225 | placement->busy_placement = places; |
226 | } |
227 | |
228 | /** |
229 | * amdgpu_bo_create_reserved - create reserved BO for kernel use |
230 | * |
231 | * @adev: amdgpu device object |
232 | * @size: size for the new BO |
233 | * @align: alignment for the new BO |
234 | * @domain: where to place it |
235 | * @bo_ptr: used to initialize BOs in structures |
236 | * @gpu_addr: GPU addr of the pinned BO |
237 | * @cpu_addr: optional CPU address mapping |
238 | * |
239 | * Allocates and pins a BO for kernel internal use, and returns it still |
240 | * reserved. |
241 | * |
242 | * Note: For bo_ptr new BO is only created if bo_ptr points to NULL. |
243 | * |
244 | * Returns: |
245 | * 0 on success, negative error code otherwise. |
246 | */ |
247 | int amdgpu_bo_create_reserved(struct amdgpu_device *adev, |
248 | unsigned long size, int align, |
249 | u32 domain, struct amdgpu_bo **bo_ptr, |
250 | u64 *gpu_addr, void **cpu_addr) |
251 | { |
252 | struct amdgpu_bo_param bp; |
253 | bool free = false; |
254 | int r; |
255 | |
256 | if (!size) { |
257 | amdgpu_bo_unref(bo: bo_ptr); |
258 | return 0; |
259 | } |
260 | |
261 | memset(&bp, 0, sizeof(bp)); |
262 | bp.size = size; |
263 | bp.byte_align = align; |
264 | bp.domain = domain; |
265 | bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
266 | : AMDGPU_GEM_CREATE_NO_CPU_ACCESS; |
267 | bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; |
268 | bp.type = ttm_bo_type_kernel; |
269 | bp.resv = NULL; |
270 | bp.bo_ptr_size = sizeof(struct amdgpu_bo); |
271 | |
272 | if (!*bo_ptr) { |
273 | r = amdgpu_bo_create(adev, bp: &bp, bo_ptr); |
274 | if (r) { |
275 | dev_err(adev->dev, "(%d) failed to allocate kernel bo\n" , |
276 | r); |
277 | return r; |
278 | } |
279 | free = true; |
280 | } |
281 | |
282 | r = amdgpu_bo_reserve(bo: *bo_ptr, no_intr: false); |
283 | if (r) { |
284 | dev_err(adev->dev, "(%d) failed to reserve kernel bo\n" , r); |
285 | goto error_free; |
286 | } |
287 | |
288 | r = amdgpu_bo_pin(bo: *bo_ptr, domain); |
289 | if (r) { |
290 | dev_err(adev->dev, "(%d) kernel bo pin failed\n" , r); |
291 | goto error_unreserve; |
292 | } |
293 | |
294 | r = amdgpu_ttm_alloc_gart(bo: &(*bo_ptr)->tbo); |
295 | if (r) { |
296 | dev_err(adev->dev, "%p bind failed\n" , *bo_ptr); |
297 | goto error_unpin; |
298 | } |
299 | |
300 | if (gpu_addr) |
301 | *gpu_addr = amdgpu_bo_gpu_offset(bo: *bo_ptr); |
302 | |
303 | if (cpu_addr) { |
304 | r = amdgpu_bo_kmap(bo: *bo_ptr, ptr: cpu_addr); |
305 | if (r) { |
306 | dev_err(adev->dev, "(%d) kernel bo map failed\n" , r); |
307 | goto error_unpin; |
308 | } |
309 | } |
310 | |
311 | return 0; |
312 | |
313 | error_unpin: |
314 | amdgpu_bo_unpin(bo: *bo_ptr); |
315 | error_unreserve: |
316 | amdgpu_bo_unreserve(bo: *bo_ptr); |
317 | |
318 | error_free: |
319 | if (free) |
320 | amdgpu_bo_unref(bo: bo_ptr); |
321 | |
322 | return r; |
323 | } |
324 | |
325 | /** |
326 | * amdgpu_bo_create_kernel - create BO for kernel use |
327 | * |
328 | * @adev: amdgpu device object |
329 | * @size: size for the new BO |
330 | * @align: alignment for the new BO |
331 | * @domain: where to place it |
332 | * @bo_ptr: used to initialize BOs in structures |
333 | * @gpu_addr: GPU addr of the pinned BO |
334 | * @cpu_addr: optional CPU address mapping |
335 | * |
336 | * Allocates and pins a BO for kernel internal use. |
337 | * |
338 | * Note: For bo_ptr new BO is only created if bo_ptr points to NULL. |
339 | * |
340 | * Returns: |
341 | * 0 on success, negative error code otherwise. |
342 | */ |
343 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, |
344 | unsigned long size, int align, |
345 | u32 domain, struct amdgpu_bo **bo_ptr, |
346 | u64 *gpu_addr, void **cpu_addr) |
347 | { |
348 | int r; |
349 | |
350 | r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr, |
351 | gpu_addr, cpu_addr); |
352 | |
353 | if (r) |
354 | return r; |
355 | |
356 | if (*bo_ptr) |
357 | amdgpu_bo_unreserve(bo: *bo_ptr); |
358 | |
359 | return 0; |
360 | } |
361 | |
362 | /** |
363 | * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location |
364 | * |
365 | * @adev: amdgpu device object |
366 | * @offset: offset of the BO |
367 | * @size: size of the BO |
368 | * @bo_ptr: used to initialize BOs in structures |
369 | * @cpu_addr: optional CPU address mapping |
370 | * |
371 | * Creates a kernel BO at a specific offset in VRAM. |
372 | * |
373 | * Returns: |
374 | * 0 on success, negative error code otherwise. |
375 | */ |
376 | int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, |
377 | uint64_t offset, uint64_t size, |
378 | struct amdgpu_bo **bo_ptr, void **cpu_addr) |
379 | { |
380 | struct ttm_operation_ctx ctx = { false, false }; |
381 | unsigned int i; |
382 | int r; |
383 | |
384 | offset &= PAGE_MASK; |
385 | size = ALIGN(size, PAGE_SIZE); |
386 | |
387 | r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, |
388 | AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL, |
389 | cpu_addr); |
390 | if (r) |
391 | return r; |
392 | |
393 | if ((*bo_ptr) == NULL) |
394 | return 0; |
395 | |
396 | /* |
397 | * Remove the original mem node and create a new one at the request |
398 | * position. |
399 | */ |
400 | if (cpu_addr) |
401 | amdgpu_bo_kunmap(bo: *bo_ptr); |
402 | |
403 | ttm_resource_free(bo: &(*bo_ptr)->tbo, res: &(*bo_ptr)->tbo.resource); |
404 | |
405 | for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { |
406 | (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT; |
407 | (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; |
408 | } |
409 | r = ttm_bo_mem_space(bo: &(*bo_ptr)->tbo, placement: &(*bo_ptr)->placement, |
410 | mem: &(*bo_ptr)->tbo.resource, ctx: &ctx); |
411 | if (r) |
412 | goto error; |
413 | |
414 | if (cpu_addr) { |
415 | r = amdgpu_bo_kmap(bo: *bo_ptr, ptr: cpu_addr); |
416 | if (r) |
417 | goto error; |
418 | } |
419 | |
420 | amdgpu_bo_unreserve(bo: *bo_ptr); |
421 | return 0; |
422 | |
423 | error: |
424 | amdgpu_bo_unreserve(bo: *bo_ptr); |
425 | amdgpu_bo_unref(bo: bo_ptr); |
426 | return r; |
427 | } |
428 | |
429 | /** |
430 | * amdgpu_bo_free_kernel - free BO for kernel use |
431 | * |
432 | * @bo: amdgpu BO to free |
433 | * @gpu_addr: pointer to where the BO's GPU memory space address was stored |
434 | * @cpu_addr: pointer to where the BO's CPU memory space address was stored |
435 | * |
436 | * unmaps and unpin a BO for kernel internal use. |
437 | */ |
438 | void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, |
439 | void **cpu_addr) |
440 | { |
441 | if (*bo == NULL) |
442 | return; |
443 | |
444 | WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend); |
445 | |
446 | if (likely(amdgpu_bo_reserve(*bo, true) == 0)) { |
447 | if (cpu_addr) |
448 | amdgpu_bo_kunmap(bo: *bo); |
449 | |
450 | amdgpu_bo_unpin(bo: *bo); |
451 | amdgpu_bo_unreserve(bo: *bo); |
452 | } |
453 | amdgpu_bo_unref(bo); |
454 | |
455 | if (gpu_addr) |
456 | *gpu_addr = 0; |
457 | |
458 | if (cpu_addr) |
459 | *cpu_addr = NULL; |
460 | } |
461 | |
462 | /* Validate bo size is bit bigger than the request domain */ |
463 | static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, |
464 | unsigned long size, u32 domain) |
465 | { |
466 | struct ttm_resource_manager *man = NULL; |
467 | |
468 | /* |
469 | * If GTT is part of requested domains the check must succeed to |
470 | * allow fall back to GTT. |
471 | */ |
472 | if (domain & AMDGPU_GEM_DOMAIN_GTT) |
473 | man = ttm_manager_type(bdev: &adev->mman.bdev, TTM_PL_TT); |
474 | else if (domain & AMDGPU_GEM_DOMAIN_VRAM) |
475 | man = ttm_manager_type(bdev: &adev->mman.bdev, TTM_PL_VRAM); |
476 | else |
477 | return true; |
478 | |
479 | if (!man) { |
480 | if (domain & AMDGPU_GEM_DOMAIN_GTT) |
481 | WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized" ); |
482 | return false; |
483 | } |
484 | |
485 | /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU, _DOMAIN_DOORBELL */ |
486 | if (size < man->size) |
487 | return true; |
488 | |
489 | DRM_DEBUG("BO size %lu > total memory in domain: %llu\n" , size, man->size); |
490 | return false; |
491 | } |
492 | |
493 | bool amdgpu_bo_support_uswc(u64 bo_flags) |
494 | { |
495 | |
496 | #ifdef CONFIG_X86_32 |
497 | /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit |
498 | * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 |
499 | */ |
500 | return false; |
501 | #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) |
502 | /* Don't try to enable write-combining when it can't work, or things |
503 | * may be slow |
504 | * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 |
505 | */ |
506 | |
507 | #ifndef CONFIG_COMPILE_TEST |
508 | #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ |
509 | thanks to write-combining |
510 | #endif |
511 | |
512 | if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) |
513 | DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " |
514 | "better performance thanks to write-combining\n" ); |
515 | return false; |
516 | #else |
517 | /* For architectures that don't support WC memory, |
518 | * mask out the WC flag from the BO |
519 | */ |
520 | if (!drm_arch_can_wc_memory()) |
521 | return false; |
522 | |
523 | return true; |
524 | #endif |
525 | } |
526 | |
527 | /** |
528 | * amdgpu_bo_create - create an &amdgpu_bo buffer object |
529 | * @adev: amdgpu device object |
530 | * @bp: parameters to be used for the buffer object |
531 | * @bo_ptr: pointer to the buffer object pointer |
532 | * |
533 | * Creates an &amdgpu_bo buffer object. |
534 | * |
535 | * Returns: |
536 | * 0 for success or a negative error code on failure. |
537 | */ |
538 | int amdgpu_bo_create(struct amdgpu_device *adev, |
539 | struct amdgpu_bo_param *bp, |
540 | struct amdgpu_bo **bo_ptr) |
541 | { |
542 | struct ttm_operation_ctx ctx = { |
543 | .interruptible = (bp->type != ttm_bo_type_kernel), |
544 | .no_wait_gpu = bp->no_wait_gpu, |
545 | /* We opt to avoid OOM on system pages allocations */ |
546 | .gfp_retry_mayfail = true, |
547 | .allow_res_evict = bp->type != ttm_bo_type_kernel, |
548 | .resv = bp->resv |
549 | }; |
550 | struct amdgpu_bo *bo; |
551 | unsigned long page_align, size = bp->size; |
552 | int r; |
553 | |
554 | /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */ |
555 | if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { |
556 | /* GWS and OA don't need any alignment. */ |
557 | page_align = bp->byte_align; |
558 | size <<= PAGE_SHIFT; |
559 | |
560 | } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) { |
561 | /* Both size and alignment must be a multiple of 4. */ |
562 | page_align = ALIGN(bp->byte_align, 4); |
563 | size = ALIGN(size, 4) << PAGE_SHIFT; |
564 | } else { |
565 | /* Memory should be aligned at least to a page size. */ |
566 | page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
567 | size = ALIGN(size, PAGE_SIZE); |
568 | } |
569 | |
570 | if (!amdgpu_bo_validate_size(adev, size, domain: bp->domain)) |
571 | return -ENOMEM; |
572 | |
573 | BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo)); |
574 | |
575 | *bo_ptr = NULL; |
576 | bo = kvzalloc(size: bp->bo_ptr_size, GFP_KERNEL); |
577 | if (bo == NULL) |
578 | return -ENOMEM; |
579 | drm_gem_private_object_init(dev: adev_to_drm(adev), obj: &bo->tbo.base, size); |
580 | bo->vm_bo = NULL; |
581 | bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : |
582 | bp->domain; |
583 | bo->allowed_domains = bo->preferred_domains; |
584 | if (bp->type != ttm_bo_type_kernel && |
585 | !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) && |
586 | bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) |
587 | bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; |
588 | |
589 | bo->flags = bp->flags; |
590 | |
591 | if (adev->gmc.mem_partitions) |
592 | /* For GPUs with spatial partitioning, bo->xcp_id=-1 means any partition */ |
593 | bo->xcp_id = bp->xcp_id_plus1 - 1; |
594 | else |
595 | /* For GPUs without spatial partitioning */ |
596 | bo->xcp_id = 0; |
597 | |
598 | if (!amdgpu_bo_support_uswc(bo_flags: bo->flags)) |
599 | bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; |
600 | |
601 | if (adev->ras_enabled) |
602 | bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; |
603 | |
604 | bo->tbo.bdev = &adev->mman.bdev; |
605 | if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA | |
606 | AMDGPU_GEM_DOMAIN_GDS)) |
607 | amdgpu_bo_placement_from_domain(abo: bo, AMDGPU_GEM_DOMAIN_CPU); |
608 | else |
609 | amdgpu_bo_placement_from_domain(abo: bo, domain: bp->domain); |
610 | if (bp->type == ttm_bo_type_kernel) |
611 | bo->tbo.priority = 1; |
612 | |
613 | if (!bp->destroy) |
614 | bp->destroy = &amdgpu_bo_destroy; |
615 | |
616 | r = ttm_bo_init_reserved(bdev: &adev->mman.bdev, bo: &bo->tbo, type: bp->type, |
617 | placement: &bo->placement, alignment: page_align, ctx: &ctx, NULL, |
618 | resv: bp->resv, destroy: bp->destroy); |
619 | if (unlikely(r != 0)) |
620 | return r; |
621 | |
622 | if (!amdgpu_gmc_vram_full_visible(gmc: &adev->gmc) && |
623 | bo->tbo.resource->mem_type == TTM_PL_VRAM && |
624 | amdgpu_bo_in_cpu_visible_vram(bo)) |
625 | amdgpu_cs_report_moved_bytes(adev, num_bytes: ctx.bytes_moved, |
626 | num_vis_bytes: ctx.bytes_moved); |
627 | else |
628 | amdgpu_cs_report_moved_bytes(adev, num_bytes: ctx.bytes_moved, num_vis_bytes: 0); |
629 | |
630 | if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && |
631 | bo->tbo.resource->mem_type == TTM_PL_VRAM) { |
632 | struct dma_fence *fence; |
633 | |
634 | r = amdgpu_fill_buffer(bo, src_data: 0, resv: bo->tbo.base.resv, fence: &fence, delayed: true); |
635 | if (unlikely(r)) |
636 | goto fail_unreserve; |
637 | |
638 | dma_resv_add_fence(obj: bo->tbo.base.resv, fence, |
639 | usage: DMA_RESV_USAGE_KERNEL); |
640 | dma_fence_put(fence); |
641 | } |
642 | if (!bp->resv) |
643 | amdgpu_bo_unreserve(bo); |
644 | *bo_ptr = bo; |
645 | |
646 | trace_amdgpu_bo_create(bo); |
647 | |
648 | /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */ |
649 | if (bp->type == ttm_bo_type_device) |
650 | bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; |
651 | |
652 | return 0; |
653 | |
654 | fail_unreserve: |
655 | if (!bp->resv) |
656 | dma_resv_unlock(obj: bo->tbo.base.resv); |
657 | amdgpu_bo_unref(bo: &bo); |
658 | return r; |
659 | } |
660 | |
661 | /** |
662 | * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object |
663 | * @adev: amdgpu device object |
664 | * @bp: parameters to be used for the buffer object |
665 | * @ubo_ptr: pointer to the buffer object pointer |
666 | * |
667 | * Create a BO to be used by user application; |
668 | * |
669 | * Returns: |
670 | * 0 for success or a negative error code on failure. |
671 | */ |
672 | |
673 | int amdgpu_bo_create_user(struct amdgpu_device *adev, |
674 | struct amdgpu_bo_param *bp, |
675 | struct amdgpu_bo_user **ubo_ptr) |
676 | { |
677 | struct amdgpu_bo *bo_ptr; |
678 | int r; |
679 | |
680 | bp->bo_ptr_size = sizeof(struct amdgpu_bo_user); |
681 | bp->destroy = &amdgpu_bo_user_destroy; |
682 | r = amdgpu_bo_create(adev, bp, bo_ptr: &bo_ptr); |
683 | if (r) |
684 | return r; |
685 | |
686 | *ubo_ptr = to_amdgpu_bo_user(bo_ptr); |
687 | return r; |
688 | } |
689 | |
690 | /** |
691 | * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object |
692 | * @adev: amdgpu device object |
693 | * @bp: parameters to be used for the buffer object |
694 | * @vmbo_ptr: pointer to the buffer object pointer |
695 | * |
696 | * Create a BO to be for GPUVM. |
697 | * |
698 | * Returns: |
699 | * 0 for success or a negative error code on failure. |
700 | */ |
701 | |
702 | int amdgpu_bo_create_vm(struct amdgpu_device *adev, |
703 | struct amdgpu_bo_param *bp, |
704 | struct amdgpu_bo_vm **vmbo_ptr) |
705 | { |
706 | struct amdgpu_bo *bo_ptr; |
707 | int r; |
708 | |
709 | /* bo_ptr_size will be determined by the caller and it depends on |
710 | * num of amdgpu_vm_pt entries. |
711 | */ |
712 | BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm)); |
713 | r = amdgpu_bo_create(adev, bp, bo_ptr: &bo_ptr); |
714 | if (r) |
715 | return r; |
716 | |
717 | *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr); |
718 | return r; |
719 | } |
720 | |
721 | /** |
722 | * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list |
723 | * |
724 | * @vmbo: BO that will be inserted into the shadow list |
725 | * |
726 | * Insert a BO to the shadow list. |
727 | */ |
728 | void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo) |
729 | { |
730 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: vmbo->bo.tbo.bdev); |
731 | |
732 | mutex_lock(&adev->shadow_list_lock); |
733 | list_add_tail(new: &vmbo->shadow_list, head: &adev->shadow_list); |
734 | vmbo->shadow->parent = amdgpu_bo_ref(bo: &vmbo->bo); |
735 | vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy; |
736 | mutex_unlock(lock: &adev->shadow_list_lock); |
737 | } |
738 | |
739 | /** |
740 | * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow |
741 | * |
742 | * @shadow: &amdgpu_bo shadow to be restored |
743 | * @fence: dma_fence associated with the operation |
744 | * |
745 | * Copies a buffer object's shadow content back to the object. |
746 | * This is used for recovering a buffer from its shadow in case of a gpu |
747 | * reset where vram context may be lost. |
748 | * |
749 | * Returns: |
750 | * 0 for success or a negative error code on failure. |
751 | */ |
752 | int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence) |
753 | |
754 | { |
755 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: shadow->tbo.bdev); |
756 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
757 | uint64_t shadow_addr, parent_addr; |
758 | |
759 | shadow_addr = amdgpu_bo_gpu_offset(bo: shadow); |
760 | parent_addr = amdgpu_bo_gpu_offset(bo: shadow->parent); |
761 | |
762 | return amdgpu_copy_buffer(ring, src_offset: shadow_addr, dst_offset: parent_addr, |
763 | byte_count: amdgpu_bo_size(bo: shadow), NULL, fence, |
764 | direct_submit: true, vm_needs_flush: false, tmz: false); |
765 | } |
766 | |
767 | /** |
768 | * amdgpu_bo_kmap - map an &amdgpu_bo buffer object |
769 | * @bo: &amdgpu_bo buffer object to be mapped |
770 | * @ptr: kernel virtual address to be returned |
771 | * |
772 | * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls |
773 | * amdgpu_bo_kptr() to get the kernel virtual address. |
774 | * |
775 | * Returns: |
776 | * 0 for success or a negative error code on failure. |
777 | */ |
778 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) |
779 | { |
780 | void *kptr; |
781 | long r; |
782 | |
783 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) |
784 | return -EPERM; |
785 | |
786 | r = dma_resv_wait_timeout(obj: bo->tbo.base.resv, usage: DMA_RESV_USAGE_KERNEL, |
787 | intr: false, MAX_SCHEDULE_TIMEOUT); |
788 | if (r < 0) |
789 | return r; |
790 | |
791 | kptr = amdgpu_bo_kptr(bo); |
792 | if (kptr) { |
793 | if (ptr) |
794 | *ptr = kptr; |
795 | return 0; |
796 | } |
797 | |
798 | r = ttm_bo_kmap(bo: &bo->tbo, start_page: 0, PFN_UP(bo->tbo.base.size), map: &bo->kmap); |
799 | if (r) |
800 | return r; |
801 | |
802 | if (ptr) |
803 | *ptr = amdgpu_bo_kptr(bo); |
804 | |
805 | return 0; |
806 | } |
807 | |
808 | /** |
809 | * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object |
810 | * @bo: &amdgpu_bo buffer object |
811 | * |
812 | * Calls ttm_kmap_obj_virtual() to get the kernel virtual address |
813 | * |
814 | * Returns: |
815 | * the virtual address of a buffer object area. |
816 | */ |
817 | void *amdgpu_bo_kptr(struct amdgpu_bo *bo) |
818 | { |
819 | bool is_iomem; |
820 | |
821 | return ttm_kmap_obj_virtual(map: &bo->kmap, is_iomem: &is_iomem); |
822 | } |
823 | |
824 | /** |
825 | * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object |
826 | * @bo: &amdgpu_bo buffer object to be unmapped |
827 | * |
828 | * Unmaps a kernel map set up by amdgpu_bo_kmap(). |
829 | */ |
830 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo) |
831 | { |
832 | if (bo->kmap.bo) |
833 | ttm_bo_kunmap(map: &bo->kmap); |
834 | } |
835 | |
836 | /** |
837 | * amdgpu_bo_ref - reference an &amdgpu_bo buffer object |
838 | * @bo: &amdgpu_bo buffer object |
839 | * |
840 | * References the contained &ttm_buffer_object. |
841 | * |
842 | * Returns: |
843 | * a refcounted pointer to the &amdgpu_bo buffer object. |
844 | */ |
845 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) |
846 | { |
847 | if (bo == NULL) |
848 | return NULL; |
849 | |
850 | ttm_bo_get(bo: &bo->tbo); |
851 | return bo; |
852 | } |
853 | |
854 | /** |
855 | * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object |
856 | * @bo: &amdgpu_bo buffer object |
857 | * |
858 | * Unreferences the contained &ttm_buffer_object and clear the pointer |
859 | */ |
860 | void amdgpu_bo_unref(struct amdgpu_bo **bo) |
861 | { |
862 | struct ttm_buffer_object *tbo; |
863 | |
864 | if ((*bo) == NULL) |
865 | return; |
866 | |
867 | tbo = &((*bo)->tbo); |
868 | ttm_bo_put(bo: tbo); |
869 | *bo = NULL; |
870 | } |
871 | |
872 | /** |
873 | * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object |
874 | * @bo: &amdgpu_bo buffer object to be pinned |
875 | * @domain: domain to be pinned to |
876 | * @min_offset: the start of requested address range |
877 | * @max_offset: the end of requested address range |
878 | * |
879 | * Pins the buffer object according to requested domain and address range. If |
880 | * the memory is unbound gart memory, binds the pages into gart table. Adjusts |
881 | * pin_count and pin_size accordingly. |
882 | * |
883 | * Pinning means to lock pages in memory along with keeping them at a fixed |
884 | * offset. It is required when a buffer can not be moved, for example, when |
885 | * a display buffer is being scanned out. |
886 | * |
887 | * Compared with amdgpu_bo_pin(), this function gives more flexibility on |
888 | * where to pin a buffer if there are specific restrictions on where a buffer |
889 | * must be located. |
890 | * |
891 | * Returns: |
892 | * 0 for success or a negative error code on failure. |
893 | */ |
894 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, |
895 | u64 min_offset, u64 max_offset) |
896 | { |
897 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->tbo.bdev); |
898 | struct ttm_operation_ctx ctx = { false, false }; |
899 | int r, i; |
900 | |
901 | if (amdgpu_ttm_tt_get_usermm(ttm: bo->tbo.ttm)) |
902 | return -EPERM; |
903 | |
904 | if (WARN_ON_ONCE(min_offset > max_offset)) |
905 | return -EINVAL; |
906 | |
907 | /* Check domain to be pinned to against preferred domains */ |
908 | if (bo->preferred_domains & domain) |
909 | domain = bo->preferred_domains & domain; |
910 | |
911 | /* A shared bo cannot be migrated to VRAM */ |
912 | if (bo->tbo.base.import_attach) { |
913 | if (domain & AMDGPU_GEM_DOMAIN_GTT) |
914 | domain = AMDGPU_GEM_DOMAIN_GTT; |
915 | else |
916 | return -EINVAL; |
917 | } |
918 | |
919 | if (bo->tbo.pin_count) { |
920 | uint32_t mem_type = bo->tbo.resource->mem_type; |
921 | uint32_t mem_flags = bo->tbo.resource->placement; |
922 | |
923 | if (!(domain & amdgpu_mem_type_to_domain(mem_type))) |
924 | return -EINVAL; |
925 | |
926 | if ((mem_type == TTM_PL_VRAM) && |
927 | (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) && |
928 | !(mem_flags & TTM_PL_FLAG_CONTIGUOUS)) |
929 | return -EINVAL; |
930 | |
931 | ttm_bo_pin(bo: &bo->tbo); |
932 | |
933 | if (max_offset != 0) { |
934 | u64 domain_start = amdgpu_ttm_domain_start(adev, |
935 | type: mem_type); |
936 | WARN_ON_ONCE(max_offset < |
937 | (amdgpu_bo_gpu_offset(bo) - domain_start)); |
938 | } |
939 | |
940 | return 0; |
941 | } |
942 | |
943 | /* This assumes only APU display buffers are pinned with (VRAM|GTT). |
944 | * See function amdgpu_display_supported_domains() |
945 | */ |
946 | domain = amdgpu_bo_get_preferred_domain(adev, domain); |
947 | |
948 | if (bo->tbo.base.import_attach) |
949 | dma_buf_pin(attach: bo->tbo.base.import_attach); |
950 | |
951 | /* force to pin into visible video ram */ |
952 | if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) |
953 | bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; |
954 | amdgpu_bo_placement_from_domain(abo: bo, domain); |
955 | for (i = 0; i < bo->placement.num_placement; i++) { |
956 | unsigned int fpfn, lpfn; |
957 | |
958 | fpfn = min_offset >> PAGE_SHIFT; |
959 | lpfn = max_offset >> PAGE_SHIFT; |
960 | |
961 | if (fpfn > bo->placements[i].fpfn) |
962 | bo->placements[i].fpfn = fpfn; |
963 | if (!bo->placements[i].lpfn || |
964 | (lpfn && lpfn < bo->placements[i].lpfn)) |
965 | bo->placements[i].lpfn = lpfn; |
966 | } |
967 | |
968 | r = ttm_bo_validate(bo: &bo->tbo, placement: &bo->placement, ctx: &ctx); |
969 | if (unlikely(r)) { |
970 | dev_err(adev->dev, "%p pin failed\n" , bo); |
971 | goto error; |
972 | } |
973 | |
974 | ttm_bo_pin(bo: &bo->tbo); |
975 | |
976 | domain = amdgpu_mem_type_to_domain(mem_type: bo->tbo.resource->mem_type); |
977 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) { |
978 | atomic64_add(i: amdgpu_bo_size(bo), v: &adev->vram_pin_size); |
979 | atomic64_add(i: amdgpu_vram_mgr_bo_visible_size(bo), |
980 | v: &adev->visible_pin_size); |
981 | } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { |
982 | atomic64_add(i: amdgpu_bo_size(bo), v: &adev->gart_pin_size); |
983 | } |
984 | |
985 | error: |
986 | return r; |
987 | } |
988 | |
989 | /** |
990 | * amdgpu_bo_pin - pin an &amdgpu_bo buffer object |
991 | * @bo: &amdgpu_bo buffer object to be pinned |
992 | * @domain: domain to be pinned to |
993 | * |
994 | * A simple wrapper to amdgpu_bo_pin_restricted(). |
995 | * Provides a simpler API for buffers that do not have any strict restrictions |
996 | * on where a buffer must be located. |
997 | * |
998 | * Returns: |
999 | * 0 for success or a negative error code on failure. |
1000 | */ |
1001 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) |
1002 | { |
1003 | bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; |
1004 | return amdgpu_bo_pin_restricted(bo, domain, min_offset: 0, max_offset: 0); |
1005 | } |
1006 | |
1007 | /** |
1008 | * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object |
1009 | * @bo: &amdgpu_bo buffer object to be unpinned |
1010 | * |
1011 | * Decreases the pin_count, and clears the flags if pin_count reaches 0. |
1012 | * Changes placement and pin size accordingly. |
1013 | * |
1014 | * Returns: |
1015 | * 0 for success or a negative error code on failure. |
1016 | */ |
1017 | void amdgpu_bo_unpin(struct amdgpu_bo *bo) |
1018 | { |
1019 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->tbo.bdev); |
1020 | |
1021 | ttm_bo_unpin(bo: &bo->tbo); |
1022 | if (bo->tbo.pin_count) |
1023 | return; |
1024 | |
1025 | if (bo->tbo.base.import_attach) |
1026 | dma_buf_unpin(attach: bo->tbo.base.import_attach); |
1027 | |
1028 | if (bo->tbo.resource->mem_type == TTM_PL_VRAM) { |
1029 | atomic64_sub(i: amdgpu_bo_size(bo), v: &adev->vram_pin_size); |
1030 | atomic64_sub(i: amdgpu_vram_mgr_bo_visible_size(bo), |
1031 | v: &adev->visible_pin_size); |
1032 | } else if (bo->tbo.resource->mem_type == TTM_PL_TT) { |
1033 | atomic64_sub(i: amdgpu_bo_size(bo), v: &adev->gart_pin_size); |
1034 | } |
1035 | |
1036 | } |
1037 | |
1038 | static const char * const amdgpu_vram_names[] = { |
1039 | "UNKNOWN" , |
1040 | "GDDR1" , |
1041 | "DDR2" , |
1042 | "GDDR3" , |
1043 | "GDDR4" , |
1044 | "GDDR5" , |
1045 | "HBM" , |
1046 | "DDR3" , |
1047 | "DDR4" , |
1048 | "GDDR6" , |
1049 | "DDR5" , |
1050 | "LPDDR4" , |
1051 | "LPDDR5" |
1052 | }; |
1053 | |
1054 | /** |
1055 | * amdgpu_bo_init - initialize memory manager |
1056 | * @adev: amdgpu device object |
1057 | * |
1058 | * Calls amdgpu_ttm_init() to initialize amdgpu memory manager. |
1059 | * |
1060 | * Returns: |
1061 | * 0 for success or a negative error code on failure. |
1062 | */ |
1063 | int amdgpu_bo_init(struct amdgpu_device *adev) |
1064 | { |
1065 | /* set the default AGP aperture state */ |
1066 | amdgpu_gmc_set_agp_default(adev, mc: &adev->gmc); |
1067 | |
1068 | /* On A+A platform, VRAM can be mapped as WB */ |
1069 | if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { |
1070 | /* reserve PAT memory space to WC for VRAM */ |
1071 | int r = arch_io_reserve_memtype_wc(start: adev->gmc.aper_base, |
1072 | size: adev->gmc.aper_size); |
1073 | |
1074 | if (r) { |
1075 | DRM_ERROR("Unable to set WC memtype for the aperture base\n" ); |
1076 | return r; |
1077 | } |
1078 | |
1079 | /* Add an MTRR for the VRAM */ |
1080 | adev->gmc.vram_mtrr = arch_phys_wc_add(base: adev->gmc.aper_base, |
1081 | size: adev->gmc.aper_size); |
1082 | } |
1083 | |
1084 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n" , |
1085 | adev->gmc.mc_vram_size >> 20, |
1086 | (unsigned long long)adev->gmc.aper_size >> 20); |
1087 | DRM_INFO("RAM width %dbits %s\n" , |
1088 | adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]); |
1089 | return amdgpu_ttm_init(adev); |
1090 | } |
1091 | |
1092 | /** |
1093 | * amdgpu_bo_fini - tear down memory manager |
1094 | * @adev: amdgpu device object |
1095 | * |
1096 | * Reverses amdgpu_bo_init() to tear down memory manager. |
1097 | */ |
1098 | void amdgpu_bo_fini(struct amdgpu_device *adev) |
1099 | { |
1100 | int idx; |
1101 | |
1102 | amdgpu_ttm_fini(adev); |
1103 | |
1104 | if (drm_dev_enter(dev: adev_to_drm(adev), idx: &idx)) { |
1105 | if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { |
1106 | arch_phys_wc_del(handle: adev->gmc.vram_mtrr); |
1107 | arch_io_free_memtype_wc(start: adev->gmc.aper_base, size: adev->gmc.aper_size); |
1108 | } |
1109 | drm_dev_exit(idx); |
1110 | } |
1111 | } |
1112 | |
1113 | /** |
1114 | * amdgpu_bo_set_tiling_flags - set tiling flags |
1115 | * @bo: &amdgpu_bo buffer object |
1116 | * @tiling_flags: new flags |
1117 | * |
1118 | * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or |
1119 | * kernel driver to set the tiling flags on a buffer. |
1120 | * |
1121 | * Returns: |
1122 | * 0 for success or a negative error code on failure. |
1123 | */ |
1124 | int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) |
1125 | { |
1126 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->tbo.bdev); |
1127 | struct amdgpu_bo_user *ubo; |
1128 | |
1129 | BUG_ON(bo->tbo.type == ttm_bo_type_kernel); |
1130 | if (adev->family <= AMDGPU_FAMILY_CZ && |
1131 | AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6) |
1132 | return -EINVAL; |
1133 | |
1134 | ubo = to_amdgpu_bo_user(bo); |
1135 | ubo->tiling_flags = tiling_flags; |
1136 | return 0; |
1137 | } |
1138 | |
1139 | /** |
1140 | * amdgpu_bo_get_tiling_flags - get tiling flags |
1141 | * @bo: &amdgpu_bo buffer object |
1142 | * @tiling_flags: returned flags |
1143 | * |
1144 | * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to |
1145 | * set the tiling flags on a buffer. |
1146 | */ |
1147 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) |
1148 | { |
1149 | struct amdgpu_bo_user *ubo; |
1150 | |
1151 | BUG_ON(bo->tbo.type == ttm_bo_type_kernel); |
1152 | dma_resv_assert_held(bo->tbo.base.resv); |
1153 | ubo = to_amdgpu_bo_user(bo); |
1154 | |
1155 | if (tiling_flags) |
1156 | *tiling_flags = ubo->tiling_flags; |
1157 | } |
1158 | |
1159 | /** |
1160 | * amdgpu_bo_set_metadata - set metadata |
1161 | * @bo: &amdgpu_bo buffer object |
1162 | * @metadata: new metadata |
1163 | * @metadata_size: size of the new metadata |
1164 | * @flags: flags of the new metadata |
1165 | * |
1166 | * Sets buffer object's metadata, its size and flags. |
1167 | * Used via GEM ioctl. |
1168 | * |
1169 | * Returns: |
1170 | * 0 for success or a negative error code on failure. |
1171 | */ |
1172 | int amdgpu_bo_set_metadata(struct amdgpu_bo *bo, void *metadata, |
1173 | u32 metadata_size, uint64_t flags) |
1174 | { |
1175 | struct amdgpu_bo_user *ubo; |
1176 | void *buffer; |
1177 | |
1178 | BUG_ON(bo->tbo.type == ttm_bo_type_kernel); |
1179 | ubo = to_amdgpu_bo_user(bo); |
1180 | if (!metadata_size) { |
1181 | if (ubo->metadata_size) { |
1182 | kfree(objp: ubo->metadata); |
1183 | ubo->metadata = NULL; |
1184 | ubo->metadata_size = 0; |
1185 | } |
1186 | return 0; |
1187 | } |
1188 | |
1189 | if (metadata == NULL) |
1190 | return -EINVAL; |
1191 | |
1192 | buffer = kmemdup(p: metadata, size: metadata_size, GFP_KERNEL); |
1193 | if (buffer == NULL) |
1194 | return -ENOMEM; |
1195 | |
1196 | kfree(objp: ubo->metadata); |
1197 | ubo->metadata_flags = flags; |
1198 | ubo->metadata = buffer; |
1199 | ubo->metadata_size = metadata_size; |
1200 | |
1201 | return 0; |
1202 | } |
1203 | |
1204 | /** |
1205 | * amdgpu_bo_get_metadata - get metadata |
1206 | * @bo: &amdgpu_bo buffer object |
1207 | * @buffer: returned metadata |
1208 | * @buffer_size: size of the buffer |
1209 | * @metadata_size: size of the returned metadata |
1210 | * @flags: flags of the returned metadata |
1211 | * |
1212 | * Gets buffer object's metadata, its size and flags. buffer_size shall not be |
1213 | * less than metadata_size. |
1214 | * Used via GEM ioctl. |
1215 | * |
1216 | * Returns: |
1217 | * 0 for success or a negative error code on failure. |
1218 | */ |
1219 | int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, |
1220 | size_t buffer_size, uint32_t *metadata_size, |
1221 | uint64_t *flags) |
1222 | { |
1223 | struct amdgpu_bo_user *ubo; |
1224 | |
1225 | if (!buffer && !metadata_size) |
1226 | return -EINVAL; |
1227 | |
1228 | BUG_ON(bo->tbo.type == ttm_bo_type_kernel); |
1229 | ubo = to_amdgpu_bo_user(bo); |
1230 | if (metadata_size) |
1231 | *metadata_size = ubo->metadata_size; |
1232 | |
1233 | if (buffer) { |
1234 | if (buffer_size < ubo->metadata_size) |
1235 | return -EINVAL; |
1236 | |
1237 | if (ubo->metadata_size) |
1238 | memcpy(buffer, ubo->metadata, ubo->metadata_size); |
1239 | } |
1240 | |
1241 | if (flags) |
1242 | *flags = ubo->metadata_flags; |
1243 | |
1244 | return 0; |
1245 | } |
1246 | |
1247 | /** |
1248 | * amdgpu_bo_move_notify - notification about a memory move |
1249 | * @bo: pointer to a buffer object |
1250 | * @evict: if this move is evicting the buffer from the graphics address space |
1251 | * @new_mem: new information of the bufer object |
1252 | * |
1253 | * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs |
1254 | * bookkeeping. |
1255 | * TTM driver callback which is called when ttm moves a buffer. |
1256 | */ |
1257 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, |
1258 | bool evict, |
1259 | struct ttm_resource *new_mem) |
1260 | { |
1261 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->bdev); |
1262 | struct amdgpu_bo *abo; |
1263 | struct ttm_resource *old_mem = bo->resource; |
1264 | |
1265 | if (!amdgpu_bo_is_amdgpu_bo(bo)) |
1266 | return; |
1267 | |
1268 | abo = ttm_to_amdgpu_bo(tbo: bo); |
1269 | amdgpu_vm_bo_invalidate(adev, bo: abo, evicted: evict); |
1270 | |
1271 | amdgpu_bo_kunmap(bo: abo); |
1272 | |
1273 | if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach && |
1274 | bo->resource->mem_type != TTM_PL_SYSTEM) |
1275 | dma_buf_move_notify(dma_buf: abo->tbo.base.dma_buf); |
1276 | |
1277 | /* remember the eviction */ |
1278 | if (evict) |
1279 | atomic64_inc(v: &adev->num_evictions); |
1280 | |
1281 | /* update statistics */ |
1282 | if (!new_mem) |
1283 | return; |
1284 | |
1285 | /* move_notify is called before move happens */ |
1286 | trace_amdgpu_bo_move(bo: abo, new_placement: new_mem->mem_type, old_placement: old_mem->mem_type); |
1287 | } |
1288 | |
1289 | void amdgpu_bo_get_memory(struct amdgpu_bo *bo, |
1290 | struct amdgpu_mem_stats *stats) |
1291 | { |
1292 | uint64_t size = amdgpu_bo_size(bo); |
1293 | unsigned int domain; |
1294 | |
1295 | /* Abort if the BO doesn't currently have a backing store */ |
1296 | if (!bo->tbo.resource) |
1297 | return; |
1298 | |
1299 | domain = amdgpu_mem_type_to_domain(mem_type: bo->tbo.resource->mem_type); |
1300 | switch (domain) { |
1301 | case AMDGPU_GEM_DOMAIN_VRAM: |
1302 | stats->vram += size; |
1303 | if (amdgpu_bo_in_cpu_visible_vram(bo)) |
1304 | stats->visible_vram += size; |
1305 | break; |
1306 | case AMDGPU_GEM_DOMAIN_GTT: |
1307 | stats->gtt += size; |
1308 | break; |
1309 | case AMDGPU_GEM_DOMAIN_CPU: |
1310 | default: |
1311 | stats->cpu += size; |
1312 | break; |
1313 | } |
1314 | |
1315 | if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) { |
1316 | stats->requested_vram += size; |
1317 | if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) |
1318 | stats->requested_visible_vram += size; |
1319 | |
1320 | if (domain != AMDGPU_GEM_DOMAIN_VRAM) { |
1321 | stats->evicted_vram += size; |
1322 | if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) |
1323 | stats->evicted_visible_vram += size; |
1324 | } |
1325 | } else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) { |
1326 | stats->requested_gtt += size; |
1327 | } |
1328 | } |
1329 | |
1330 | /** |
1331 | * amdgpu_bo_release_notify - notification about a BO being released |
1332 | * @bo: pointer to a buffer object |
1333 | * |
1334 | * Wipes VRAM buffers whose contents should not be leaked before the |
1335 | * memory is released. |
1336 | */ |
1337 | void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) |
1338 | { |
1339 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->bdev); |
1340 | struct dma_fence *fence = NULL; |
1341 | struct amdgpu_bo *abo; |
1342 | int r; |
1343 | |
1344 | if (!amdgpu_bo_is_amdgpu_bo(bo)) |
1345 | return; |
1346 | |
1347 | abo = ttm_to_amdgpu_bo(tbo: bo); |
1348 | |
1349 | if (abo->kfd_bo) |
1350 | amdgpu_amdkfd_release_notify(bo: abo); |
1351 | |
1352 | /* We only remove the fence if the resv has individualized. */ |
1353 | WARN_ON_ONCE(bo->type == ttm_bo_type_kernel |
1354 | && bo->base.resv != &bo->base._resv); |
1355 | if (bo->base.resv == &bo->base._resv) |
1356 | amdgpu_amdkfd_remove_fence_on_pt_pd_bos(bo: abo); |
1357 | |
1358 | if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM || |
1359 | !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) || |
1360 | adev->in_suspend || drm_dev_is_unplugged(dev: adev_to_drm(adev))) |
1361 | return; |
1362 | |
1363 | if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))) |
1364 | return; |
1365 | |
1366 | r = amdgpu_fill_buffer(bo: abo, AMDGPU_POISON, resv: bo->base.resv, fence: &fence, delayed: true); |
1367 | if (!WARN_ON(r)) { |
1368 | amdgpu_bo_fence(bo: abo, fence, shared: false); |
1369 | dma_fence_put(fence); |
1370 | } |
1371 | |
1372 | dma_resv_unlock(obj: bo->base.resv); |
1373 | } |
1374 | |
1375 | /** |
1376 | * amdgpu_bo_fault_reserve_notify - notification about a memory fault |
1377 | * @bo: pointer to a buffer object |
1378 | * |
1379 | * Notifies the driver we are taking a fault on this BO and have reserved it, |
1380 | * also performs bookkeeping. |
1381 | * TTM driver callback for dealing with vm faults. |
1382 | * |
1383 | * Returns: |
1384 | * 0 for success or a negative error code on failure. |
1385 | */ |
1386 | vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
1387 | { |
1388 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->bdev); |
1389 | struct ttm_operation_ctx ctx = { false, false }; |
1390 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo: bo); |
1391 | int r; |
1392 | |
1393 | /* Remember that this BO was accessed by the CPU */ |
1394 | abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; |
1395 | |
1396 | if (bo->resource->mem_type != TTM_PL_VRAM) |
1397 | return 0; |
1398 | |
1399 | if (amdgpu_bo_in_cpu_visible_vram(bo: abo)) |
1400 | return 0; |
1401 | |
1402 | /* Can't move a pinned BO to visible VRAM */ |
1403 | if (abo->tbo.pin_count > 0) |
1404 | return VM_FAULT_SIGBUS; |
1405 | |
1406 | /* hurrah the memory is not visible ! */ |
1407 | atomic64_inc(v: &adev->num_vram_cpu_page_faults); |
1408 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | |
1409 | AMDGPU_GEM_DOMAIN_GTT); |
1410 | |
1411 | /* Avoid costly evictions; only set GTT as a busy placement */ |
1412 | abo->placement.num_busy_placement = 1; |
1413 | abo->placement.busy_placement = &abo->placements[1]; |
1414 | |
1415 | r = ttm_bo_validate(bo, placement: &abo->placement, ctx: &ctx); |
1416 | if (unlikely(r == -EBUSY || r == -ERESTARTSYS)) |
1417 | return VM_FAULT_NOPAGE; |
1418 | else if (unlikely(r)) |
1419 | return VM_FAULT_SIGBUS; |
1420 | |
1421 | /* this should never happen */ |
1422 | if (bo->resource->mem_type == TTM_PL_VRAM && |
1423 | !amdgpu_bo_in_cpu_visible_vram(bo: abo)) |
1424 | return VM_FAULT_SIGBUS; |
1425 | |
1426 | ttm_bo_move_to_lru_tail_unlocked(bo); |
1427 | return 0; |
1428 | } |
1429 | |
1430 | /** |
1431 | * amdgpu_bo_fence - add fence to buffer object |
1432 | * |
1433 | * @bo: buffer object in question |
1434 | * @fence: fence to add |
1435 | * @shared: true if fence should be added shared |
1436 | * |
1437 | */ |
1438 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, |
1439 | bool shared) |
1440 | { |
1441 | struct dma_resv *resv = bo->tbo.base.resv; |
1442 | int r; |
1443 | |
1444 | r = dma_resv_reserve_fences(obj: resv, num_fences: 1); |
1445 | if (r) { |
1446 | /* As last resort on OOM we block for the fence */ |
1447 | dma_fence_wait(fence, intr: false); |
1448 | return; |
1449 | } |
1450 | |
1451 | dma_resv_add_fence(obj: resv, fence, usage: shared ? DMA_RESV_USAGE_READ : |
1452 | DMA_RESV_USAGE_WRITE); |
1453 | } |
1454 | |
1455 | /** |
1456 | * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences |
1457 | * |
1458 | * @adev: amdgpu device pointer |
1459 | * @resv: reservation object to sync to |
1460 | * @sync_mode: synchronization mode |
1461 | * @owner: fence owner |
1462 | * @intr: Whether the wait is interruptible |
1463 | * |
1464 | * Extract the fences from the reservation object and waits for them to finish. |
1465 | * |
1466 | * Returns: |
1467 | * 0 on success, errno otherwise. |
1468 | */ |
1469 | int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, |
1470 | enum amdgpu_sync_mode sync_mode, void *owner, |
1471 | bool intr) |
1472 | { |
1473 | struct amdgpu_sync sync; |
1474 | int r; |
1475 | |
1476 | amdgpu_sync_create(sync: &sync); |
1477 | amdgpu_sync_resv(adev, sync: &sync, resv, mode: sync_mode, owner); |
1478 | r = amdgpu_sync_wait(sync: &sync, intr); |
1479 | amdgpu_sync_free(sync: &sync); |
1480 | return r; |
1481 | } |
1482 | |
1483 | /** |
1484 | * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv |
1485 | * @bo: buffer object to wait for |
1486 | * @owner: fence owner |
1487 | * @intr: Whether the wait is interruptible |
1488 | * |
1489 | * Wrapper to wait for fences in a BO. |
1490 | * Returns: |
1491 | * 0 on success, errno otherwise. |
1492 | */ |
1493 | int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) |
1494 | { |
1495 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->tbo.bdev); |
1496 | |
1497 | return amdgpu_bo_sync_wait_resv(adev, resv: bo->tbo.base.resv, |
1498 | sync_mode: AMDGPU_SYNC_NE_OWNER, owner, intr); |
1499 | } |
1500 | |
1501 | /** |
1502 | * amdgpu_bo_gpu_offset - return GPU offset of bo |
1503 | * @bo: amdgpu object for which we query the offset |
1504 | * |
1505 | * Note: object should either be pinned or reserved when calling this |
1506 | * function, it might be useful to add check for this for debugging. |
1507 | * |
1508 | * Returns: |
1509 | * current GPU offset of the object. |
1510 | */ |
1511 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) |
1512 | { |
1513 | WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM); |
1514 | WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) && |
1515 | !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel); |
1516 | WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET); |
1517 | WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM && |
1518 | !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)); |
1519 | |
1520 | return amdgpu_bo_gpu_offset_no_check(bo); |
1521 | } |
1522 | |
1523 | /** |
1524 | * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo |
1525 | * @bo: amdgpu object for which we query the offset |
1526 | * |
1527 | * Returns: |
1528 | * current GPU offset of the object without raising warnings. |
1529 | */ |
1530 | u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo) |
1531 | { |
1532 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->tbo.bdev); |
1533 | uint64_t offset; |
1534 | |
1535 | offset = (bo->tbo.resource->start << PAGE_SHIFT) + |
1536 | amdgpu_ttm_domain_start(adev, type: bo->tbo.resource->mem_type); |
1537 | |
1538 | return amdgpu_gmc_sign_extend(addr: offset); |
1539 | } |
1540 | |
1541 | /** |
1542 | * amdgpu_bo_get_preferred_domain - get preferred domain |
1543 | * @adev: amdgpu device object |
1544 | * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>` |
1545 | * |
1546 | * Returns: |
1547 | * Which of the allowed domains is preferred for allocating the BO. |
1548 | */ |
1549 | uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, |
1550 | uint32_t domain) |
1551 | { |
1552 | if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) && |
1553 | ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) { |
1554 | domain = AMDGPU_GEM_DOMAIN_VRAM; |
1555 | if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) |
1556 | domain = AMDGPU_GEM_DOMAIN_GTT; |
1557 | } |
1558 | return domain; |
1559 | } |
1560 | |
1561 | #if defined(CONFIG_DEBUG_FS) |
1562 | #define amdgpu_bo_print_flag(m, bo, flag) \ |
1563 | do { \ |
1564 | if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \ |
1565 | seq_printf((m), " " #flag); \ |
1566 | } \ |
1567 | } while (0) |
1568 | |
1569 | /** |
1570 | * amdgpu_bo_print_info - print BO info in debugfs file |
1571 | * |
1572 | * @id: Index or Id of the BO |
1573 | * @bo: Requested BO for printing info |
1574 | * @m: debugfs file |
1575 | * |
1576 | * Print BO information in debugfs file |
1577 | * |
1578 | * Returns: |
1579 | * Size of the BO in bytes. |
1580 | */ |
1581 | u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) |
1582 | { |
1583 | struct dma_buf_attachment *attachment; |
1584 | struct dma_buf *dma_buf; |
1585 | const char *placement; |
1586 | unsigned int pin_count; |
1587 | u64 size; |
1588 | |
1589 | if (dma_resv_trylock(obj: bo->tbo.base.resv)) { |
1590 | unsigned int domain; |
1591 | domain = amdgpu_mem_type_to_domain(mem_type: bo->tbo.resource->mem_type); |
1592 | switch (domain) { |
1593 | case AMDGPU_GEM_DOMAIN_VRAM: |
1594 | if (amdgpu_bo_in_cpu_visible_vram(bo)) |
1595 | placement = "VRAM VISIBLE" ; |
1596 | else |
1597 | placement = "VRAM" ; |
1598 | break; |
1599 | case AMDGPU_GEM_DOMAIN_GTT: |
1600 | placement = "GTT" ; |
1601 | break; |
1602 | case AMDGPU_GEM_DOMAIN_CPU: |
1603 | default: |
1604 | placement = "CPU" ; |
1605 | break; |
1606 | } |
1607 | dma_resv_unlock(obj: bo->tbo.base.resv); |
1608 | } else { |
1609 | placement = "UNKNOWN" ; |
1610 | } |
1611 | |
1612 | size = amdgpu_bo_size(bo); |
1613 | seq_printf(m, fmt: "\t\t0x%08x: %12lld byte %s" , |
1614 | id, size, placement); |
1615 | |
1616 | pin_count = READ_ONCE(bo->tbo.pin_count); |
1617 | if (pin_count) |
1618 | seq_printf(m, fmt: " pin count %d" , pin_count); |
1619 | |
1620 | dma_buf = READ_ONCE(bo->tbo.base.dma_buf); |
1621 | attachment = READ_ONCE(bo->tbo.base.import_attach); |
1622 | |
1623 | if (attachment) |
1624 | seq_printf(m, fmt: " imported from ino:%lu" , file_inode(f: dma_buf->file)->i_ino); |
1625 | else if (dma_buf) |
1626 | seq_printf(m, fmt: " exported as ino:%lu" , file_inode(f: dma_buf->file)->i_ino); |
1627 | |
1628 | amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED); |
1629 | amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS); |
1630 | amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC); |
1631 | amdgpu_bo_print_flag(m, bo, VRAM_CLEARED); |
1632 | amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS); |
1633 | amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID); |
1634 | amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC); |
1635 | |
1636 | seq_puts(m, s: "\n" ); |
1637 | |
1638 | return size; |
1639 | } |
1640 | #endif |
1641 | |