1 | /* |
2 | * Copyright 2008 Jerome Glisse. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice (including the next |
13 | * paragraph) shall be included in all copies or substantial portions of the |
14 | * Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
22 | * DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: |
25 | * Jerome Glisse <glisse@freedesktop.org> |
26 | */ |
27 | #include <linux/pagemap.h> |
28 | #include <linux/sync_file.h> |
29 | #include <drm/drmP.h> |
30 | #include <drm/amdgpu_drm.h> |
31 | #include <drm/drm_syncobj.h> |
32 | #include "amdgpu.h" |
33 | #include "amdgpu_trace.h" |
34 | #include "amdgpu_gmc.h" |
35 | #include "amdgpu_gem.h" |
36 | |
37 | static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, |
38 | struct drm_amdgpu_cs_chunk_fence *data, |
39 | uint32_t *offset) |
40 | { |
41 | struct drm_gem_object *gobj; |
42 | struct amdgpu_bo *bo; |
43 | unsigned long size; |
44 | int r; |
45 | |
46 | gobj = drm_gem_object_lookup(p->filp, data->handle); |
47 | if (gobj == NULL) |
48 | return -EINVAL; |
49 | |
50 | bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); |
51 | p->uf_entry.priority = 0; |
52 | p->uf_entry.tv.bo = &bo->tbo; |
53 | /* One for TTM and one for the CS job */ |
54 | p->uf_entry.tv.num_shared = 2; |
55 | p->uf_entry.user_pages = NULL; |
56 | |
57 | drm_gem_object_put_unlocked(gobj); |
58 | |
59 | size = amdgpu_bo_size(bo); |
60 | if (size != PAGE_SIZE || (data->offset + 8) > size) { |
61 | r = -EINVAL; |
62 | goto error_unref; |
63 | } |
64 | |
65 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { |
66 | r = -EINVAL; |
67 | goto error_unref; |
68 | } |
69 | |
70 | *offset = data->offset; |
71 | |
72 | return 0; |
73 | |
74 | error_unref: |
75 | amdgpu_bo_unref(&bo); |
76 | return r; |
77 | } |
78 | |
79 | static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, |
80 | struct drm_amdgpu_bo_list_in *data) |
81 | { |
82 | int r; |
83 | struct drm_amdgpu_bo_list_entry *info = NULL; |
84 | |
85 | r = amdgpu_bo_create_list_entry_array(data, &info); |
86 | if (r) |
87 | return r; |
88 | |
89 | r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number, |
90 | &p->bo_list); |
91 | if (r) |
92 | goto error_free; |
93 | |
94 | kvfree(info); |
95 | return 0; |
96 | |
97 | error_free: |
98 | if (info) |
99 | kvfree(info); |
100 | |
101 | return r; |
102 | } |
103 | |
104 | static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) |
105 | { |
106 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
107 | struct amdgpu_vm *vm = &fpriv->vm; |
108 | uint64_t *chunk_array_user; |
109 | uint64_t *chunk_array; |
110 | unsigned size, num_ibs = 0; |
111 | uint32_t uf_offset = 0; |
112 | int i; |
113 | int ret; |
114 | |
115 | if (cs->in.num_chunks == 0) |
116 | return 0; |
117 | |
118 | chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); |
119 | if (!chunk_array) |
120 | return -ENOMEM; |
121 | |
122 | p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); |
123 | if (!p->ctx) { |
124 | ret = -EINVAL; |
125 | goto free_chunk; |
126 | } |
127 | |
128 | mutex_lock(&p->ctx->lock); |
129 | |
130 | /* skip guilty context job */ |
131 | if (atomic_read(&p->ctx->guilty) == 1) { |
132 | ret = -ECANCELED; |
133 | goto free_chunk; |
134 | } |
135 | |
136 | /* get chunks */ |
137 | chunk_array_user = u64_to_user_ptr(cs->in.chunks); |
138 | if (copy_from_user(chunk_array, chunk_array_user, |
139 | sizeof(uint64_t)*cs->in.num_chunks)) { |
140 | ret = -EFAULT; |
141 | goto free_chunk; |
142 | } |
143 | |
144 | p->nchunks = cs->in.num_chunks; |
145 | p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), |
146 | GFP_KERNEL); |
147 | if (!p->chunks) { |
148 | ret = -ENOMEM; |
149 | goto free_chunk; |
150 | } |
151 | |
152 | for (i = 0; i < p->nchunks; i++) { |
153 | struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; |
154 | struct drm_amdgpu_cs_chunk user_chunk; |
155 | uint32_t __user *cdata; |
156 | |
157 | chunk_ptr = u64_to_user_ptr(chunk_array[i]); |
158 | if (copy_from_user(&user_chunk, chunk_ptr, |
159 | sizeof(struct drm_amdgpu_cs_chunk))) { |
160 | ret = -EFAULT; |
161 | i--; |
162 | goto free_partial_kdata; |
163 | } |
164 | p->chunks[i].chunk_id = user_chunk.chunk_id; |
165 | p->chunks[i].length_dw = user_chunk.length_dw; |
166 | |
167 | size = p->chunks[i].length_dw; |
168 | cdata = u64_to_user_ptr(user_chunk.chunk_data); |
169 | |
170 | p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); |
171 | if (p->chunks[i].kdata == NULL) { |
172 | ret = -ENOMEM; |
173 | i--; |
174 | goto free_partial_kdata; |
175 | } |
176 | size *= sizeof(uint32_t); |
177 | if (copy_from_user(p->chunks[i].kdata, cdata, size)) { |
178 | ret = -EFAULT; |
179 | goto free_partial_kdata; |
180 | } |
181 | |
182 | switch (p->chunks[i].chunk_id) { |
183 | case AMDGPU_CHUNK_ID_IB: |
184 | ++num_ibs; |
185 | break; |
186 | |
187 | case AMDGPU_CHUNK_ID_FENCE: |
188 | size = sizeof(struct drm_amdgpu_cs_chunk_fence); |
189 | if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { |
190 | ret = -EINVAL; |
191 | goto free_partial_kdata; |
192 | } |
193 | |
194 | ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata, |
195 | &uf_offset); |
196 | if (ret) |
197 | goto free_partial_kdata; |
198 | |
199 | break; |
200 | |
201 | case AMDGPU_CHUNK_ID_BO_HANDLES: |
202 | size = sizeof(struct drm_amdgpu_bo_list_in); |
203 | if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { |
204 | ret = -EINVAL; |
205 | goto free_partial_kdata; |
206 | } |
207 | |
208 | ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata); |
209 | if (ret) |
210 | goto free_partial_kdata; |
211 | |
212 | break; |
213 | |
214 | case AMDGPU_CHUNK_ID_DEPENDENCIES: |
215 | case AMDGPU_CHUNK_ID_SYNCOBJ_IN: |
216 | case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: |
217 | case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: |
218 | break; |
219 | |
220 | default: |
221 | ret = -EINVAL; |
222 | goto free_partial_kdata; |
223 | } |
224 | } |
225 | |
226 | ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm); |
227 | if (ret) |
228 | goto free_all_kdata; |
229 | |
230 | if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) { |
231 | ret = -ECANCELED; |
232 | goto free_all_kdata; |
233 | } |
234 | |
235 | if (p->uf_entry.tv.bo) |
236 | p->job->uf_addr = uf_offset; |
237 | kfree(chunk_array); |
238 | |
239 | /* Use this opportunity to fill in task info for the vm */ |
240 | amdgpu_vm_set_task_info(vm); |
241 | |
242 | return 0; |
243 | |
244 | free_all_kdata: |
245 | i = p->nchunks - 1; |
246 | free_partial_kdata: |
247 | for (; i >= 0; i--) |
248 | kvfree(p->chunks[i].kdata); |
249 | kfree(p->chunks); |
250 | p->chunks = NULL; |
251 | p->nchunks = 0; |
252 | free_chunk: |
253 | kfree(chunk_array); |
254 | |
255 | return ret; |
256 | } |
257 | |
258 | /* Convert microseconds to bytes. */ |
259 | static u64 us_to_bytes(struct amdgpu_device *adev, s64 us) |
260 | { |
261 | if (us <= 0 || !adev->mm_stats.log2_max_MBps) |
262 | return 0; |
263 | |
264 | /* Since accum_us is incremented by a million per second, just |
265 | * multiply it by the number of MB/s to get the number of bytes. |
266 | */ |
267 | return us << adev->mm_stats.log2_max_MBps; |
268 | } |
269 | |
270 | static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) |
271 | { |
272 | if (!adev->mm_stats.log2_max_MBps) |
273 | return 0; |
274 | |
275 | return bytes >> adev->mm_stats.log2_max_MBps; |
276 | } |
277 | |
278 | /* Returns how many bytes TTM can move right now. If no bytes can be moved, |
279 | * it returns 0. If it returns non-zero, it's OK to move at least one buffer, |
280 | * which means it can go over the threshold once. If that happens, the driver |
281 | * will be in debt and no other buffer migrations can be done until that debt |
282 | * is repaid. |
283 | * |
284 | * This approach allows moving a buffer of any size (it's important to allow |
285 | * that). |
286 | * |
287 | * The currency is simply time in microseconds and it increases as the clock |
288 | * ticks. The accumulated microseconds (us) are converted to bytes and |
289 | * returned. |
290 | */ |
291 | static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, |
292 | u64 *max_bytes, |
293 | u64 *max_vis_bytes) |
294 | { |
295 | s64 time_us, increment_us; |
296 | u64 free_vram, total_vram, used_vram; |
297 | |
298 | /* Allow a maximum of 200 accumulated ms. This is basically per-IB |
299 | * throttling. |
300 | * |
301 | * It means that in order to get full max MBps, at least 5 IBs per |
302 | * second must be submitted and not more than 200ms apart from each |
303 | * other. |
304 | */ |
305 | const s64 us_upper_bound = 200000; |
306 | |
307 | if (!adev->mm_stats.log2_max_MBps) { |
308 | *max_bytes = 0; |
309 | *max_vis_bytes = 0; |
310 | return; |
311 | } |
312 | |
313 | total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); |
314 | used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); |
315 | free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; |
316 | |
317 | spin_lock(&adev->mm_stats.lock); |
318 | |
319 | /* Increase the amount of accumulated us. */ |
320 | time_us = ktime_to_us(ktime_get()); |
321 | increment_us = time_us - adev->mm_stats.last_update_us; |
322 | adev->mm_stats.last_update_us = time_us; |
323 | adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us, |
324 | us_upper_bound); |
325 | |
326 | /* This prevents the short period of low performance when the VRAM |
327 | * usage is low and the driver is in debt or doesn't have enough |
328 | * accumulated us to fill VRAM quickly. |
329 | * |
330 | * The situation can occur in these cases: |
331 | * - a lot of VRAM is freed by userspace |
332 | * - the presence of a big buffer causes a lot of evictions |
333 | * (solution: split buffers into smaller ones) |
334 | * |
335 | * If 128 MB or 1/8th of VRAM is free, start filling it now by setting |
336 | * accum_us to a positive number. |
337 | */ |
338 | if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) { |
339 | s64 min_us; |
340 | |
341 | /* Be more aggresive on dGPUs. Try to fill a portion of free |
342 | * VRAM now. |
343 | */ |
344 | if (!(adev->flags & AMD_IS_APU)) |
345 | min_us = bytes_to_us(adev, free_vram / 4); |
346 | else |
347 | min_us = 0; /* Reset accum_us on APUs. */ |
348 | |
349 | adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); |
350 | } |
351 | |
352 | /* This is set to 0 if the driver is in debt to disallow (optional) |
353 | * buffer moves. |
354 | */ |
355 | *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); |
356 | |
357 | /* Do the same for visible VRAM if half of it is free */ |
358 | if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) { |
359 | u64 total_vis_vram = adev->gmc.visible_vram_size; |
360 | u64 used_vis_vram = |
361 | amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); |
362 | |
363 | if (used_vis_vram < total_vis_vram) { |
364 | u64 free_vis_vram = total_vis_vram - used_vis_vram; |
365 | adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + |
366 | increment_us, us_upper_bound); |
367 | |
368 | if (free_vis_vram >= total_vis_vram / 2) |
369 | adev->mm_stats.accum_us_vis = |
370 | max(bytes_to_us(adev, free_vis_vram / 2), |
371 | adev->mm_stats.accum_us_vis); |
372 | } |
373 | |
374 | *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis); |
375 | } else { |
376 | *max_vis_bytes = 0; |
377 | } |
378 | |
379 | spin_unlock(&adev->mm_stats.lock); |
380 | } |
381 | |
382 | /* Report how many bytes have really been moved for the last command |
383 | * submission. This can result in a debt that can stop buffer migrations |
384 | * temporarily. |
385 | */ |
386 | void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, |
387 | u64 num_vis_bytes) |
388 | { |
389 | spin_lock(&adev->mm_stats.lock); |
390 | adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); |
391 | adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes); |
392 | spin_unlock(&adev->mm_stats.lock); |
393 | } |
394 | |
395 | static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, |
396 | struct amdgpu_bo *bo) |
397 | { |
398 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
399 | struct ttm_operation_ctx ctx = { |
400 | .interruptible = true, |
401 | .no_wait_gpu = false, |
402 | .resv = bo->tbo.resv, |
403 | .flags = 0 |
404 | }; |
405 | uint32_t domain; |
406 | int r; |
407 | |
408 | if (bo->pin_count) |
409 | return 0; |
410 | |
411 | /* Don't move this buffer if we have depleted our allowance |
412 | * to move it. Don't move anything if the threshold is zero. |
413 | */ |
414 | if (p->bytes_moved < p->bytes_moved_threshold) { |
415 | if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && |
416 | (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { |
417 | /* And don't move a CPU_ACCESS_REQUIRED BO to limited |
418 | * visible VRAM if we've depleted our allowance to do |
419 | * that. |
420 | */ |
421 | if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) |
422 | domain = bo->preferred_domains; |
423 | else |
424 | domain = bo->allowed_domains; |
425 | } else { |
426 | domain = bo->preferred_domains; |
427 | } |
428 | } else { |
429 | domain = bo->allowed_domains; |
430 | } |
431 | |
432 | retry: |
433 | amdgpu_bo_placement_from_domain(bo, domain); |
434 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
435 | |
436 | p->bytes_moved += ctx.bytes_moved; |
437 | if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && |
438 | amdgpu_bo_in_cpu_visible_vram(bo)) |
439 | p->bytes_moved_vis += ctx.bytes_moved; |
440 | |
441 | if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { |
442 | domain = bo->allowed_domains; |
443 | goto retry; |
444 | } |
445 | |
446 | return r; |
447 | } |
448 | |
449 | /* Last resort, try to evict something from the current working set */ |
450 | static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, |
451 | struct amdgpu_bo *validated) |
452 | { |
453 | uint32_t domain = validated->allowed_domains; |
454 | struct ttm_operation_ctx ctx = { true, false }; |
455 | int r; |
456 | |
457 | if (!p->evictable) |
458 | return false; |
459 | |
460 | for (;&p->evictable->tv.head != &p->validated; |
461 | p->evictable = list_prev_entry(p->evictable, tv.head)) { |
462 | |
463 | struct amdgpu_bo_list_entry *candidate = p->evictable; |
464 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo); |
465 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
466 | bool update_bytes_moved_vis; |
467 | uint32_t other; |
468 | |
469 | /* If we reached our current BO we can forget it */ |
470 | if (bo == validated) |
471 | break; |
472 | |
473 | /* We can't move pinned BOs here */ |
474 | if (bo->pin_count) |
475 | continue; |
476 | |
477 | other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); |
478 | |
479 | /* Check if this BO is in one of the domains we need space for */ |
480 | if (!(other & domain)) |
481 | continue; |
482 | |
483 | /* Check if we can move this BO somewhere else */ |
484 | other = bo->allowed_domains & ~domain; |
485 | if (!other) |
486 | continue; |
487 | |
488 | /* Good we can try to move this BO somewhere else */ |
489 | update_bytes_moved_vis = |
490 | !amdgpu_gmc_vram_full_visible(&adev->gmc) && |
491 | amdgpu_bo_in_cpu_visible_vram(bo); |
492 | amdgpu_bo_placement_from_domain(bo, other); |
493 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
494 | p->bytes_moved += ctx.bytes_moved; |
495 | if (update_bytes_moved_vis) |
496 | p->bytes_moved_vis += ctx.bytes_moved; |
497 | |
498 | if (unlikely(r)) |
499 | break; |
500 | |
501 | p->evictable = list_prev_entry(p->evictable, tv.head); |
502 | list_move(&candidate->tv.head, &p->validated); |
503 | |
504 | return true; |
505 | } |
506 | |
507 | return false; |
508 | } |
509 | |
510 | static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo) |
511 | { |
512 | struct amdgpu_cs_parser *p = param; |
513 | int r; |
514 | |
515 | do { |
516 | r = amdgpu_cs_bo_validate(p, bo); |
517 | } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo)); |
518 | if (r) |
519 | return r; |
520 | |
521 | if (bo->shadow) |
522 | r = amdgpu_cs_bo_validate(p, bo->shadow); |
523 | |
524 | return r; |
525 | } |
526 | |
527 | static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, |
528 | struct list_head *validated) |
529 | { |
530 | struct ttm_operation_ctx ctx = { true, false }; |
531 | struct amdgpu_bo_list_entry *lobj; |
532 | int r; |
533 | |
534 | list_for_each_entry(lobj, validated, tv.head) { |
535 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo); |
536 | bool binding_userptr = false; |
537 | struct mm_struct *usermm; |
538 | |
539 | usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); |
540 | if (usermm && usermm != current->mm) |
541 | return -EPERM; |
542 | |
543 | /* Check if we have user pages and nobody bound the BO already */ |
544 | if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) && |
545 | lobj->user_pages) { |
546 | amdgpu_bo_placement_from_domain(bo, |
547 | AMDGPU_GEM_DOMAIN_CPU); |
548 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
549 | if (r) |
550 | return r; |
551 | amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, |
552 | lobj->user_pages); |
553 | binding_userptr = true; |
554 | } |
555 | |
556 | if (p->evictable == lobj) |
557 | p->evictable = NULL; |
558 | |
559 | r = amdgpu_cs_validate(p, bo); |
560 | if (r) |
561 | return r; |
562 | |
563 | if (binding_userptr) { |
564 | kvfree(lobj->user_pages); |
565 | lobj->user_pages = NULL; |
566 | } |
567 | } |
568 | return 0; |
569 | } |
570 | |
571 | static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, |
572 | union drm_amdgpu_cs *cs) |
573 | { |
574 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
575 | struct amdgpu_vm *vm = &fpriv->vm; |
576 | struct amdgpu_bo_list_entry *e; |
577 | struct list_head duplicates; |
578 | struct amdgpu_bo *gds; |
579 | struct amdgpu_bo *gws; |
580 | struct amdgpu_bo *oa; |
581 | unsigned tries = 10; |
582 | int r; |
583 | |
584 | INIT_LIST_HEAD(&p->validated); |
585 | |
586 | /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */ |
587 | if (cs->in.bo_list_handle) { |
588 | if (p->bo_list) |
589 | return -EINVAL; |
590 | |
591 | r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle, |
592 | &p->bo_list); |
593 | if (r) |
594 | return r; |
595 | } else if (!p->bo_list) { |
596 | /* Create a empty bo_list when no handle is provided */ |
597 | r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0, |
598 | &p->bo_list); |
599 | if (r) |
600 | return r; |
601 | } |
602 | |
603 | /* One for TTM and one for the CS job */ |
604 | amdgpu_bo_list_for_each_entry(e, p->bo_list) |
605 | e->tv.num_shared = 2; |
606 | |
607 | amdgpu_bo_list_get_list(p->bo_list, &p->validated); |
608 | if (p->bo_list->first_userptr != p->bo_list->num_entries) |
609 | p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX); |
610 | |
611 | INIT_LIST_HEAD(&duplicates); |
612 | amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); |
613 | |
614 | if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent) |
615 | list_add(&p->uf_entry.tv.head, &p->validated); |
616 | |
617 | while (1) { |
618 | struct list_head need_pages; |
619 | |
620 | r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, |
621 | &duplicates); |
622 | if (unlikely(r != 0)) { |
623 | if (r != -ERESTARTSYS) |
624 | DRM_ERROR("ttm_eu_reserve_buffers failed.\n" ); |
625 | goto error_free_pages; |
626 | } |
627 | |
628 | INIT_LIST_HEAD(&need_pages); |
629 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { |
630 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); |
631 | |
632 | if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm, |
633 | &e->user_invalidated) && e->user_pages) { |
634 | |
635 | /* We acquired a page array, but somebody |
636 | * invalidated it. Free it and try again |
637 | */ |
638 | release_pages(e->user_pages, |
639 | bo->tbo.ttm->num_pages); |
640 | kvfree(e->user_pages); |
641 | e->user_pages = NULL; |
642 | } |
643 | |
644 | if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) && |
645 | !e->user_pages) { |
646 | list_del(&e->tv.head); |
647 | list_add(&e->tv.head, &need_pages); |
648 | |
649 | amdgpu_bo_unreserve(bo); |
650 | } |
651 | } |
652 | |
653 | if (list_empty(&need_pages)) |
654 | break; |
655 | |
656 | /* Unreserve everything again. */ |
657 | ttm_eu_backoff_reservation(&p->ticket, &p->validated); |
658 | |
659 | /* We tried too many times, just abort */ |
660 | if (!--tries) { |
661 | r = -EDEADLK; |
662 | DRM_ERROR("deadlock in %s\n" , __func__); |
663 | goto error_free_pages; |
664 | } |
665 | |
666 | /* Fill the page arrays for all userptrs. */ |
667 | list_for_each_entry(e, &need_pages, tv.head) { |
668 | struct ttm_tt *ttm = e->tv.bo->ttm; |
669 | |
670 | e->user_pages = kvmalloc_array(ttm->num_pages, |
671 | sizeof(struct page*), |
672 | GFP_KERNEL | __GFP_ZERO); |
673 | if (!e->user_pages) { |
674 | r = -ENOMEM; |
675 | DRM_ERROR("calloc failure in %s\n" , __func__); |
676 | goto error_free_pages; |
677 | } |
678 | |
679 | r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages); |
680 | if (r) { |
681 | DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n" ); |
682 | kvfree(e->user_pages); |
683 | e->user_pages = NULL; |
684 | goto error_free_pages; |
685 | } |
686 | } |
687 | |
688 | /* And try again. */ |
689 | list_splice(&need_pages, &p->validated); |
690 | } |
691 | |
692 | amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, |
693 | &p->bytes_moved_vis_threshold); |
694 | p->bytes_moved = 0; |
695 | p->bytes_moved_vis = 0; |
696 | p->evictable = list_last_entry(&p->validated, |
697 | struct amdgpu_bo_list_entry, |
698 | tv.head); |
699 | |
700 | r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, |
701 | amdgpu_cs_validate, p); |
702 | if (r) { |
703 | DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n" ); |
704 | goto error_validate; |
705 | } |
706 | |
707 | r = amdgpu_cs_list_validate(p, &duplicates); |
708 | if (r) { |
709 | DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n" ); |
710 | goto error_validate; |
711 | } |
712 | |
713 | r = amdgpu_cs_list_validate(p, &p->validated); |
714 | if (r) { |
715 | DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n" ); |
716 | goto error_validate; |
717 | } |
718 | |
719 | amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, |
720 | p->bytes_moved_vis); |
721 | |
722 | gds = p->bo_list->gds_obj; |
723 | gws = p->bo_list->gws_obj; |
724 | oa = p->bo_list->oa_obj; |
725 | |
726 | amdgpu_bo_list_for_each_entry(e, p->bo_list) { |
727 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); |
728 | |
729 | /* Make sure we use the exclusive slot for shared BOs */ |
730 | if (bo->prime_shared_count) |
731 | e->tv.num_shared = 0; |
732 | e->bo_va = amdgpu_vm_bo_find(vm, bo); |
733 | } |
734 | |
735 | if (gds) { |
736 | p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; |
737 | p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT; |
738 | } |
739 | if (gws) { |
740 | p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT; |
741 | p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT; |
742 | } |
743 | if (oa) { |
744 | p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT; |
745 | p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT; |
746 | } |
747 | |
748 | if (!r && p->uf_entry.tv.bo) { |
749 | struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo); |
750 | |
751 | r = amdgpu_ttm_alloc_gart(&uf->tbo); |
752 | p->job->uf_addr += amdgpu_bo_gpu_offset(uf); |
753 | } |
754 | |
755 | error_validate: |
756 | if (r) |
757 | ttm_eu_backoff_reservation(&p->ticket, &p->validated); |
758 | |
759 | error_free_pages: |
760 | |
761 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { |
762 | if (!e->user_pages) |
763 | continue; |
764 | |
765 | release_pages(e->user_pages, e->tv.bo->ttm->num_pages); |
766 | kvfree(e->user_pages); |
767 | } |
768 | |
769 | return r; |
770 | } |
771 | |
772 | static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) |
773 | { |
774 | struct amdgpu_bo_list_entry *e; |
775 | int r; |
776 | |
777 | list_for_each_entry(e, &p->validated, tv.head) { |
778 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); |
779 | struct reservation_object *resv = bo->tbo.resv; |
780 | |
781 | r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, |
782 | amdgpu_bo_explicit_sync(bo)); |
783 | |
784 | if (r) |
785 | return r; |
786 | } |
787 | return 0; |
788 | } |
789 | |
790 | /** |
791 | * cs_parser_fini() - clean parser states |
792 | * @parser: parser structure holding parsing context. |
793 | * @error: error number |
794 | * |
795 | * If error is set than unvalidate buffer, otherwise just free memory |
796 | * used by parsing context. |
797 | **/ |
798 | static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, |
799 | bool backoff) |
800 | { |
801 | unsigned i; |
802 | |
803 | if (error && backoff) |
804 | ttm_eu_backoff_reservation(&parser->ticket, |
805 | &parser->validated); |
806 | |
807 | for (i = 0; i < parser->num_post_dep_syncobjs; i++) |
808 | drm_syncobj_put(parser->post_dep_syncobjs[i]); |
809 | kfree(parser->post_dep_syncobjs); |
810 | |
811 | dma_fence_put(parser->fence); |
812 | |
813 | if (parser->ctx) { |
814 | mutex_unlock(&parser->ctx->lock); |
815 | amdgpu_ctx_put(parser->ctx); |
816 | } |
817 | if (parser->bo_list) |
818 | amdgpu_bo_list_put(parser->bo_list); |
819 | |
820 | for (i = 0; i < parser->nchunks; i++) |
821 | kvfree(parser->chunks[i].kdata); |
822 | kfree(parser->chunks); |
823 | if (parser->job) |
824 | amdgpu_job_free(parser->job); |
825 | if (parser->uf_entry.tv.bo) { |
826 | struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo); |
827 | |
828 | amdgpu_bo_unref(&uf); |
829 | } |
830 | } |
831 | |
832 | static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) |
833 | { |
834 | struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); |
835 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
836 | struct amdgpu_device *adev = p->adev; |
837 | struct amdgpu_vm *vm = &fpriv->vm; |
838 | struct amdgpu_bo_list_entry *e; |
839 | struct amdgpu_bo_va *bo_va; |
840 | struct amdgpu_bo *bo; |
841 | int r; |
842 | |
843 | /* Only for UVD/VCE VM emulation */ |
844 | if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) { |
845 | unsigned i, j; |
846 | |
847 | for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { |
848 | struct drm_amdgpu_cs_chunk_ib *chunk_ib; |
849 | struct amdgpu_bo_va_mapping *m; |
850 | struct amdgpu_bo *aobj = NULL; |
851 | struct amdgpu_cs_chunk *chunk; |
852 | uint64_t offset, va_start; |
853 | struct amdgpu_ib *ib; |
854 | uint8_t *kptr; |
855 | |
856 | chunk = &p->chunks[i]; |
857 | ib = &p->job->ibs[j]; |
858 | chunk_ib = chunk->kdata; |
859 | |
860 | if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) |
861 | continue; |
862 | |
863 | va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK; |
864 | r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); |
865 | if (r) { |
866 | DRM_ERROR("IB va_start is invalid\n" ); |
867 | return r; |
868 | } |
869 | |
870 | if ((va_start + chunk_ib->ib_bytes) > |
871 | (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { |
872 | DRM_ERROR("IB va_start+ib_bytes is invalid\n" ); |
873 | return -EINVAL; |
874 | } |
875 | |
876 | /* the IB should be reserved at this point */ |
877 | r = amdgpu_bo_kmap(aobj, (void **)&kptr); |
878 | if (r) { |
879 | return r; |
880 | } |
881 | |
882 | offset = m->start * AMDGPU_GPU_PAGE_SIZE; |
883 | kptr += va_start - offset; |
884 | |
885 | if (ring->funcs->parse_cs) { |
886 | memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); |
887 | amdgpu_bo_kunmap(aobj); |
888 | |
889 | r = amdgpu_ring_parse_cs(ring, p, j); |
890 | if (r) |
891 | return r; |
892 | } else { |
893 | ib->ptr = (uint32_t *)kptr; |
894 | r = amdgpu_ring_patch_cs_in_place(ring, p, j); |
895 | amdgpu_bo_kunmap(aobj); |
896 | if (r) |
897 | return r; |
898 | } |
899 | |
900 | j++; |
901 | } |
902 | } |
903 | |
904 | if (!p->job->vm) |
905 | return amdgpu_cs_sync_rings(p); |
906 | |
907 | |
908 | r = amdgpu_vm_clear_freed(adev, vm, NULL); |
909 | if (r) |
910 | return r; |
911 | |
912 | r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); |
913 | if (r) |
914 | return r; |
915 | |
916 | r = amdgpu_sync_fence(adev, &p->job->sync, |
917 | fpriv->prt_va->last_pt_update, false); |
918 | if (r) |
919 | return r; |
920 | |
921 | if (amdgpu_sriov_vf(adev)) { |
922 | struct dma_fence *f; |
923 | |
924 | bo_va = fpriv->csa_va; |
925 | BUG_ON(!bo_va); |
926 | r = amdgpu_vm_bo_update(adev, bo_va, false); |
927 | if (r) |
928 | return r; |
929 | |
930 | f = bo_va->last_pt_update; |
931 | r = amdgpu_sync_fence(adev, &p->job->sync, f, false); |
932 | if (r) |
933 | return r; |
934 | } |
935 | |
936 | amdgpu_bo_list_for_each_entry(e, p->bo_list) { |
937 | struct dma_fence *f; |
938 | |
939 | /* ignore duplicates */ |
940 | bo = ttm_to_amdgpu_bo(e->tv.bo); |
941 | if (!bo) |
942 | continue; |
943 | |
944 | bo_va = e->bo_va; |
945 | if (bo_va == NULL) |
946 | continue; |
947 | |
948 | r = amdgpu_vm_bo_update(adev, bo_va, false); |
949 | if (r) |
950 | return r; |
951 | |
952 | f = bo_va->last_pt_update; |
953 | r = amdgpu_sync_fence(adev, &p->job->sync, f, false); |
954 | if (r) |
955 | return r; |
956 | } |
957 | |
958 | r = amdgpu_vm_handle_moved(adev, vm); |
959 | if (r) |
960 | return r; |
961 | |
962 | r = amdgpu_vm_update_directories(adev, vm); |
963 | if (r) |
964 | return r; |
965 | |
966 | r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false); |
967 | if (r) |
968 | return r; |
969 | |
970 | p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); |
971 | |
972 | if (amdgpu_vm_debug) { |
973 | /* Invalidate all BOs to test for userspace bugs */ |
974 | amdgpu_bo_list_for_each_entry(e, p->bo_list) { |
975 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); |
976 | |
977 | /* ignore duplicates */ |
978 | if (!bo) |
979 | continue; |
980 | |
981 | amdgpu_vm_bo_invalidate(adev, bo, false); |
982 | } |
983 | } |
984 | |
985 | return amdgpu_cs_sync_rings(p); |
986 | } |
987 | |
988 | static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, |
989 | struct amdgpu_cs_parser *parser) |
990 | { |
991 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; |
992 | struct amdgpu_vm *vm = &fpriv->vm; |
993 | int r, ce_preempt = 0, de_preempt = 0; |
994 | struct amdgpu_ring *ring; |
995 | int i, j; |
996 | |
997 | for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) { |
998 | struct amdgpu_cs_chunk *chunk; |
999 | struct amdgpu_ib *ib; |
1000 | struct drm_amdgpu_cs_chunk_ib *chunk_ib; |
1001 | struct drm_sched_entity *entity; |
1002 | |
1003 | chunk = &parser->chunks[i]; |
1004 | ib = &parser->job->ibs[j]; |
1005 | chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; |
1006 | |
1007 | if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) |
1008 | continue; |
1009 | |
1010 | if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) { |
1011 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) { |
1012 | if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) |
1013 | ce_preempt++; |
1014 | else |
1015 | de_preempt++; |
1016 | } |
1017 | |
1018 | /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */ |
1019 | if (ce_preempt > 1 || de_preempt > 1) |
1020 | return -EINVAL; |
1021 | } |
1022 | |
1023 | r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type, |
1024 | chunk_ib->ip_instance, chunk_ib->ring, |
1025 | &entity); |
1026 | if (r) |
1027 | return r; |
1028 | |
1029 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) |
1030 | parser->job->preamble_status |= |
1031 | AMDGPU_PREAMBLE_IB_PRESENT; |
1032 | |
1033 | if (parser->entity && parser->entity != entity) |
1034 | return -EINVAL; |
1035 | |
1036 | parser->entity = entity; |
1037 | |
1038 | ring = to_amdgpu_ring(entity->rq->sched); |
1039 | r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ? |
1040 | chunk_ib->ib_bytes : 0, ib); |
1041 | if (r) { |
1042 | DRM_ERROR("Failed to get ib !\n" ); |
1043 | return r; |
1044 | } |
1045 | |
1046 | ib->gpu_addr = chunk_ib->va_start; |
1047 | ib->length_dw = chunk_ib->ib_bytes / 4; |
1048 | ib->flags = chunk_ib->flags; |
1049 | |
1050 | j++; |
1051 | } |
1052 | |
1053 | /* UVD & VCE fw doesn't support user fences */ |
1054 | ring = to_amdgpu_ring(parser->entity->rq->sched); |
1055 | if (parser->job->uf_addr && ( |
1056 | ring->funcs->type == AMDGPU_RING_TYPE_UVD || |
1057 | ring->funcs->type == AMDGPU_RING_TYPE_VCE)) |
1058 | return -EINVAL; |
1059 | |
1060 | return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity); |
1061 | } |
1062 | |
1063 | static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, |
1064 | struct amdgpu_cs_chunk *chunk) |
1065 | { |
1066 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
1067 | unsigned num_deps; |
1068 | int i, r; |
1069 | struct drm_amdgpu_cs_chunk_dep *deps; |
1070 | |
1071 | deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; |
1072 | num_deps = chunk->length_dw * 4 / |
1073 | sizeof(struct drm_amdgpu_cs_chunk_dep); |
1074 | |
1075 | for (i = 0; i < num_deps; ++i) { |
1076 | struct amdgpu_ctx *ctx; |
1077 | struct drm_sched_entity *entity; |
1078 | struct dma_fence *fence; |
1079 | |
1080 | ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id); |
1081 | if (ctx == NULL) |
1082 | return -EINVAL; |
1083 | |
1084 | r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type, |
1085 | deps[i].ip_instance, |
1086 | deps[i].ring, &entity); |
1087 | if (r) { |
1088 | amdgpu_ctx_put(ctx); |
1089 | return r; |
1090 | } |
1091 | |
1092 | fence = amdgpu_ctx_get_fence(ctx, entity, |
1093 | deps[i].handle); |
1094 | |
1095 | if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { |
1096 | struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); |
1097 | struct dma_fence *old = fence; |
1098 | |
1099 | fence = dma_fence_get(&s_fence->scheduled); |
1100 | dma_fence_put(old); |
1101 | } |
1102 | |
1103 | if (IS_ERR(fence)) { |
1104 | r = PTR_ERR(fence); |
1105 | amdgpu_ctx_put(ctx); |
1106 | return r; |
1107 | } else if (fence) { |
1108 | r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, |
1109 | true); |
1110 | dma_fence_put(fence); |
1111 | amdgpu_ctx_put(ctx); |
1112 | if (r) |
1113 | return r; |
1114 | } |
1115 | } |
1116 | return 0; |
1117 | } |
1118 | |
1119 | static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, |
1120 | uint32_t handle) |
1121 | { |
1122 | int r; |
1123 | struct dma_fence *fence; |
1124 | r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence); |
1125 | if (r) |
1126 | return r; |
1127 | |
1128 | r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); |
1129 | dma_fence_put(fence); |
1130 | |
1131 | return r; |
1132 | } |
1133 | |
1134 | static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p, |
1135 | struct amdgpu_cs_chunk *chunk) |
1136 | { |
1137 | unsigned num_deps; |
1138 | int i, r; |
1139 | struct drm_amdgpu_cs_chunk_sem *deps; |
1140 | |
1141 | deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; |
1142 | num_deps = chunk->length_dw * 4 / |
1143 | sizeof(struct drm_amdgpu_cs_chunk_sem); |
1144 | |
1145 | for (i = 0; i < num_deps; ++i) { |
1146 | r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle); |
1147 | if (r) |
1148 | return r; |
1149 | } |
1150 | return 0; |
1151 | } |
1152 | |
1153 | static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, |
1154 | struct amdgpu_cs_chunk *chunk) |
1155 | { |
1156 | unsigned num_deps; |
1157 | int i; |
1158 | struct drm_amdgpu_cs_chunk_sem *deps; |
1159 | deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; |
1160 | num_deps = chunk->length_dw * 4 / |
1161 | sizeof(struct drm_amdgpu_cs_chunk_sem); |
1162 | |
1163 | p->post_dep_syncobjs = kmalloc_array(num_deps, |
1164 | sizeof(struct drm_syncobj *), |
1165 | GFP_KERNEL); |
1166 | p->num_post_dep_syncobjs = 0; |
1167 | |
1168 | if (!p->post_dep_syncobjs) |
1169 | return -ENOMEM; |
1170 | |
1171 | for (i = 0; i < num_deps; ++i) { |
1172 | p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle); |
1173 | if (!p->post_dep_syncobjs[i]) |
1174 | return -EINVAL; |
1175 | p->num_post_dep_syncobjs++; |
1176 | } |
1177 | return 0; |
1178 | } |
1179 | |
1180 | static int amdgpu_cs_dependencies(struct amdgpu_device *adev, |
1181 | struct amdgpu_cs_parser *p) |
1182 | { |
1183 | int i, r; |
1184 | |
1185 | for (i = 0; i < p->nchunks; ++i) { |
1186 | struct amdgpu_cs_chunk *chunk; |
1187 | |
1188 | chunk = &p->chunks[i]; |
1189 | |
1190 | if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES || |
1191 | chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { |
1192 | r = amdgpu_cs_process_fence_dep(p, chunk); |
1193 | if (r) |
1194 | return r; |
1195 | } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) { |
1196 | r = amdgpu_cs_process_syncobj_in_dep(p, chunk); |
1197 | if (r) |
1198 | return r; |
1199 | } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) { |
1200 | r = amdgpu_cs_process_syncobj_out_dep(p, chunk); |
1201 | if (r) |
1202 | return r; |
1203 | } |
1204 | } |
1205 | |
1206 | return 0; |
1207 | } |
1208 | |
1209 | static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) |
1210 | { |
1211 | int i; |
1212 | |
1213 | for (i = 0; i < p->num_post_dep_syncobjs; ++i) |
1214 | drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence); |
1215 | } |
1216 | |
1217 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, |
1218 | union drm_amdgpu_cs *cs) |
1219 | { |
1220 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
1221 | struct drm_sched_entity *entity = p->entity; |
1222 | enum drm_sched_priority priority; |
1223 | struct amdgpu_ring *ring; |
1224 | struct amdgpu_bo_list_entry *e; |
1225 | struct amdgpu_job *job; |
1226 | uint64_t seq; |
1227 | |
1228 | int r; |
1229 | |
1230 | job = p->job; |
1231 | p->job = NULL; |
1232 | |
1233 | r = drm_sched_job_init(&job->base, entity, p->filp); |
1234 | if (r) |
1235 | goto error_unlock; |
1236 | |
1237 | /* No memory allocation is allowed while holding the mn lock */ |
1238 | amdgpu_mn_lock(p->mn); |
1239 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { |
1240 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); |
1241 | |
1242 | if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { |
1243 | r = -ERESTARTSYS; |
1244 | goto error_abort; |
1245 | } |
1246 | } |
1247 | |
1248 | job->owner = p->filp; |
1249 | p->fence = dma_fence_get(&job->base.s_fence->finished); |
1250 | |
1251 | amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq); |
1252 | amdgpu_cs_post_dependencies(p); |
1253 | |
1254 | if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && |
1255 | !p->ctx->preamble_presented) { |
1256 | job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; |
1257 | p->ctx->preamble_presented = true; |
1258 | } |
1259 | |
1260 | cs->out.handle = seq; |
1261 | job->uf_sequence = seq; |
1262 | |
1263 | amdgpu_job_free_resources(job); |
1264 | |
1265 | trace_amdgpu_cs_ioctl(job); |
1266 | amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket); |
1267 | priority = job->base.s_priority; |
1268 | drm_sched_entity_push_job(&job->base, entity); |
1269 | |
1270 | ring = to_amdgpu_ring(entity->rq->sched); |
1271 | amdgpu_ring_priority_get(ring, priority); |
1272 | |
1273 | amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); |
1274 | |
1275 | ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); |
1276 | amdgpu_mn_unlock(p->mn); |
1277 | |
1278 | return 0; |
1279 | |
1280 | error_abort: |
1281 | drm_sched_job_cleanup(&job->base); |
1282 | amdgpu_mn_unlock(p->mn); |
1283 | |
1284 | error_unlock: |
1285 | amdgpu_job_free(job); |
1286 | return r; |
1287 | } |
1288 | |
1289 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
1290 | { |
1291 | struct amdgpu_device *adev = dev->dev_private; |
1292 | union drm_amdgpu_cs *cs = data; |
1293 | struct amdgpu_cs_parser parser = {}; |
1294 | bool reserved_buffers = false; |
1295 | int i, r; |
1296 | |
1297 | if (!adev->accel_working) |
1298 | return -EBUSY; |
1299 | |
1300 | parser.adev = adev; |
1301 | parser.filp = filp; |
1302 | |
1303 | r = amdgpu_cs_parser_init(&parser, data); |
1304 | if (r) { |
1305 | DRM_ERROR("Failed to initialize parser %d!\n" , r); |
1306 | goto out; |
1307 | } |
1308 | |
1309 | r = amdgpu_cs_ib_fill(adev, &parser); |
1310 | if (r) |
1311 | goto out; |
1312 | |
1313 | r = amdgpu_cs_dependencies(adev, &parser); |
1314 | if (r) { |
1315 | DRM_ERROR("Failed in the dependencies handling %d!\n" , r); |
1316 | goto out; |
1317 | } |
1318 | |
1319 | r = amdgpu_cs_parser_bos(&parser, data); |
1320 | if (r) { |
1321 | if (r == -ENOMEM) |
1322 | DRM_ERROR("Not enough memory for command submission!\n" ); |
1323 | else if (r != -ERESTARTSYS) |
1324 | DRM_ERROR("Failed to process the buffer list %d!\n" , r); |
1325 | goto out; |
1326 | } |
1327 | |
1328 | reserved_buffers = true; |
1329 | |
1330 | for (i = 0; i < parser.job->num_ibs; i++) |
1331 | trace_amdgpu_cs(&parser, i); |
1332 | |
1333 | r = amdgpu_cs_vm_handling(&parser); |
1334 | if (r) |
1335 | goto out; |
1336 | |
1337 | r = amdgpu_cs_submit(&parser, cs); |
1338 | |
1339 | out: |
1340 | amdgpu_cs_parser_fini(&parser, r, reserved_buffers); |
1341 | return r; |
1342 | } |
1343 | |
1344 | /** |
1345 | * amdgpu_cs_wait_ioctl - wait for a command submission to finish |
1346 | * |
1347 | * @dev: drm device |
1348 | * @data: data from userspace |
1349 | * @filp: file private |
1350 | * |
1351 | * Wait for the command submission identified by handle to finish. |
1352 | */ |
1353 | int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, |
1354 | struct drm_file *filp) |
1355 | { |
1356 | union drm_amdgpu_wait_cs *wait = data; |
1357 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); |
1358 | struct drm_sched_entity *entity; |
1359 | struct amdgpu_ctx *ctx; |
1360 | struct dma_fence *fence; |
1361 | long r; |
1362 | |
1363 | ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); |
1364 | if (ctx == NULL) |
1365 | return -EINVAL; |
1366 | |
1367 | r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance, |
1368 | wait->in.ring, &entity); |
1369 | if (r) { |
1370 | amdgpu_ctx_put(ctx); |
1371 | return r; |
1372 | } |
1373 | |
1374 | fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle); |
1375 | if (IS_ERR(fence)) |
1376 | r = PTR_ERR(fence); |
1377 | else if (fence) { |
1378 | r = dma_fence_wait_timeout(fence, true, timeout); |
1379 | if (r > 0 && fence->error) |
1380 | r = fence->error; |
1381 | dma_fence_put(fence); |
1382 | } else |
1383 | r = 1; |
1384 | |
1385 | amdgpu_ctx_put(ctx); |
1386 | if (r < 0) |
1387 | return r; |
1388 | |
1389 | memset(wait, 0, sizeof(*wait)); |
1390 | wait->out.status = (r == 0); |
1391 | |
1392 | return 0; |
1393 | } |
1394 | |
1395 | /** |
1396 | * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence |
1397 | * |
1398 | * @adev: amdgpu device |
1399 | * @filp: file private |
1400 | * @user: drm_amdgpu_fence copied from user space |
1401 | */ |
1402 | static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, |
1403 | struct drm_file *filp, |
1404 | struct drm_amdgpu_fence *user) |
1405 | { |
1406 | struct drm_sched_entity *entity; |
1407 | struct amdgpu_ctx *ctx; |
1408 | struct dma_fence *fence; |
1409 | int r; |
1410 | |
1411 | ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id); |
1412 | if (ctx == NULL) |
1413 | return ERR_PTR(-EINVAL); |
1414 | |
1415 | r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance, |
1416 | user->ring, &entity); |
1417 | if (r) { |
1418 | amdgpu_ctx_put(ctx); |
1419 | return ERR_PTR(r); |
1420 | } |
1421 | |
1422 | fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no); |
1423 | amdgpu_ctx_put(ctx); |
1424 | |
1425 | return fence; |
1426 | } |
1427 | |
1428 | int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, |
1429 | struct drm_file *filp) |
1430 | { |
1431 | struct amdgpu_device *adev = dev->dev_private; |
1432 | union drm_amdgpu_fence_to_handle *info = data; |
1433 | struct dma_fence *fence; |
1434 | struct drm_syncobj *syncobj; |
1435 | struct sync_file *sync_file; |
1436 | int fd, r; |
1437 | |
1438 | fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence); |
1439 | if (IS_ERR(fence)) |
1440 | return PTR_ERR(fence); |
1441 | |
1442 | if (!fence) |
1443 | fence = dma_fence_get_stub(); |
1444 | |
1445 | switch (info->in.what) { |
1446 | case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ: |
1447 | r = drm_syncobj_create(&syncobj, 0, fence); |
1448 | dma_fence_put(fence); |
1449 | if (r) |
1450 | return r; |
1451 | r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle); |
1452 | drm_syncobj_put(syncobj); |
1453 | return r; |
1454 | |
1455 | case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD: |
1456 | r = drm_syncobj_create(&syncobj, 0, fence); |
1457 | dma_fence_put(fence); |
1458 | if (r) |
1459 | return r; |
1460 | r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle); |
1461 | drm_syncobj_put(syncobj); |
1462 | return r; |
1463 | |
1464 | case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD: |
1465 | fd = get_unused_fd_flags(O_CLOEXEC); |
1466 | if (fd < 0) { |
1467 | dma_fence_put(fence); |
1468 | return fd; |
1469 | } |
1470 | |
1471 | sync_file = sync_file_create(fence); |
1472 | dma_fence_put(fence); |
1473 | if (!sync_file) { |
1474 | put_unused_fd(fd); |
1475 | return -ENOMEM; |
1476 | } |
1477 | |
1478 | fd_install(fd, sync_file->file); |
1479 | info->out.handle = fd; |
1480 | return 0; |
1481 | |
1482 | default: |
1483 | return -EINVAL; |
1484 | } |
1485 | } |
1486 | |
1487 | /** |
1488 | * amdgpu_cs_wait_all_fence - wait on all fences to signal |
1489 | * |
1490 | * @adev: amdgpu device |
1491 | * @filp: file private |
1492 | * @wait: wait parameters |
1493 | * @fences: array of drm_amdgpu_fence |
1494 | */ |
1495 | static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, |
1496 | struct drm_file *filp, |
1497 | union drm_amdgpu_wait_fences *wait, |
1498 | struct drm_amdgpu_fence *fences) |
1499 | { |
1500 | uint32_t fence_count = wait->in.fence_count; |
1501 | unsigned int i; |
1502 | long r = 1; |
1503 | |
1504 | for (i = 0; i < fence_count; i++) { |
1505 | struct dma_fence *fence; |
1506 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); |
1507 | |
1508 | fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); |
1509 | if (IS_ERR(fence)) |
1510 | return PTR_ERR(fence); |
1511 | else if (!fence) |
1512 | continue; |
1513 | |
1514 | r = dma_fence_wait_timeout(fence, true, timeout); |
1515 | dma_fence_put(fence); |
1516 | if (r < 0) |
1517 | return r; |
1518 | |
1519 | if (r == 0) |
1520 | break; |
1521 | |
1522 | if (fence->error) |
1523 | return fence->error; |
1524 | } |
1525 | |
1526 | memset(wait, 0, sizeof(*wait)); |
1527 | wait->out.status = (r > 0); |
1528 | |
1529 | return 0; |
1530 | } |
1531 | |
1532 | /** |
1533 | * amdgpu_cs_wait_any_fence - wait on any fence to signal |
1534 | * |
1535 | * @adev: amdgpu device |
1536 | * @filp: file private |
1537 | * @wait: wait parameters |
1538 | * @fences: array of drm_amdgpu_fence |
1539 | */ |
1540 | static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, |
1541 | struct drm_file *filp, |
1542 | union drm_amdgpu_wait_fences *wait, |
1543 | struct drm_amdgpu_fence *fences) |
1544 | { |
1545 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); |
1546 | uint32_t fence_count = wait->in.fence_count; |
1547 | uint32_t first = ~0; |
1548 | struct dma_fence **array; |
1549 | unsigned int i; |
1550 | long r; |
1551 | |
1552 | /* Prepare the fence array */ |
1553 | array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL); |
1554 | |
1555 | if (array == NULL) |
1556 | return -ENOMEM; |
1557 | |
1558 | for (i = 0; i < fence_count; i++) { |
1559 | struct dma_fence *fence; |
1560 | |
1561 | fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); |
1562 | if (IS_ERR(fence)) { |
1563 | r = PTR_ERR(fence); |
1564 | goto err_free_fence_array; |
1565 | } else if (fence) { |
1566 | array[i] = fence; |
1567 | } else { /* NULL, the fence has been already signaled */ |
1568 | r = 1; |
1569 | first = i; |
1570 | goto out; |
1571 | } |
1572 | } |
1573 | |
1574 | r = dma_fence_wait_any_timeout(array, fence_count, true, timeout, |
1575 | &first); |
1576 | if (r < 0) |
1577 | goto err_free_fence_array; |
1578 | |
1579 | out: |
1580 | memset(wait, 0, sizeof(*wait)); |
1581 | wait->out.status = (r > 0); |
1582 | wait->out.first_signaled = first; |
1583 | |
1584 | if (first < fence_count && array[first]) |
1585 | r = array[first]->error; |
1586 | else |
1587 | r = 0; |
1588 | |
1589 | err_free_fence_array: |
1590 | for (i = 0; i < fence_count; i++) |
1591 | dma_fence_put(array[i]); |
1592 | kfree(array); |
1593 | |
1594 | return r; |
1595 | } |
1596 | |
1597 | /** |
1598 | * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish |
1599 | * |
1600 | * @dev: drm device |
1601 | * @data: data from userspace |
1602 | * @filp: file private |
1603 | */ |
1604 | int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, |
1605 | struct drm_file *filp) |
1606 | { |
1607 | struct amdgpu_device *adev = dev->dev_private; |
1608 | union drm_amdgpu_wait_fences *wait = data; |
1609 | uint32_t fence_count = wait->in.fence_count; |
1610 | struct drm_amdgpu_fence *fences_user; |
1611 | struct drm_amdgpu_fence *fences; |
1612 | int r; |
1613 | |
1614 | /* Get the fences from userspace */ |
1615 | fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), |
1616 | GFP_KERNEL); |
1617 | if (fences == NULL) |
1618 | return -ENOMEM; |
1619 | |
1620 | fences_user = u64_to_user_ptr(wait->in.fences); |
1621 | if (copy_from_user(fences, fences_user, |
1622 | sizeof(struct drm_amdgpu_fence) * fence_count)) { |
1623 | r = -EFAULT; |
1624 | goto err_free_fences; |
1625 | } |
1626 | |
1627 | if (wait->in.wait_all) |
1628 | r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); |
1629 | else |
1630 | r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); |
1631 | |
1632 | err_free_fences: |
1633 | kfree(fences); |
1634 | |
1635 | return r; |
1636 | } |
1637 | |
1638 | /** |
1639 | * amdgpu_cs_find_bo_va - find bo_va for VM address |
1640 | * |
1641 | * @parser: command submission parser context |
1642 | * @addr: VM address |
1643 | * @bo: resulting BO of the mapping found |
1644 | * |
1645 | * Search the buffer objects in the command submission context for a certain |
1646 | * virtual memory address. Returns allocation structure when found, NULL |
1647 | * otherwise. |
1648 | */ |
1649 | int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, |
1650 | uint64_t addr, struct amdgpu_bo **bo, |
1651 | struct amdgpu_bo_va_mapping **map) |
1652 | { |
1653 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; |
1654 | struct ttm_operation_ctx ctx = { false, false }; |
1655 | struct amdgpu_vm *vm = &fpriv->vm; |
1656 | struct amdgpu_bo_va_mapping *mapping; |
1657 | int r; |
1658 | |
1659 | addr /= AMDGPU_GPU_PAGE_SIZE; |
1660 | |
1661 | mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); |
1662 | if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) |
1663 | return -EINVAL; |
1664 | |
1665 | *bo = mapping->bo_va->base.bo; |
1666 | *map = mapping; |
1667 | |
1668 | /* Double check that the BO is reserved by this CS */ |
1669 | if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket) |
1670 | return -EINVAL; |
1671 | |
1672 | if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { |
1673 | (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; |
1674 | amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); |
1675 | r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); |
1676 | if (r) |
1677 | return r; |
1678 | } |
1679 | |
1680 | return amdgpu_ttm_alloc_gart(&(*bo)->tbo); |
1681 | } |
1682 | |