1 | /* |
2 | * Copyright 2017 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | #include "amdgpu_ids.h" |
24 | |
25 | #include <linux/idr.h> |
26 | #include <linux/dma-fence-array.h> |
27 | |
28 | |
29 | #include "amdgpu.h" |
30 | #include "amdgpu_trace.h" |
31 | |
32 | /* |
33 | * PASID manager |
34 | * |
35 | * PASIDs are global address space identifiers that can be shared |
36 | * between the GPU, an IOMMU and the driver. VMs on different devices |
37 | * may use the same PASID if they share the same address |
38 | * space. Therefore PASIDs are allocated using a global IDA. VMs are |
39 | * looked up from the PASID per amdgpu_device. |
40 | */ |
41 | static DEFINE_IDA(amdgpu_pasid_ida); |
42 | |
43 | /* Helper to free pasid from a fence callback */ |
44 | struct amdgpu_pasid_cb { |
45 | struct dma_fence_cb cb; |
46 | u32 pasid; |
47 | }; |
48 | |
49 | /** |
50 | * amdgpu_pasid_alloc - Allocate a PASID |
51 | * @bits: Maximum width of the PASID in bits, must be at least 1 |
52 | * |
53 | * Allocates a PASID of the given width while keeping smaller PASIDs |
54 | * available if possible. |
55 | * |
56 | * Returns a positive integer on success. Returns %-EINVAL if bits==0. |
57 | * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on |
58 | * memory allocation failure. |
59 | */ |
60 | int amdgpu_pasid_alloc(unsigned int bits) |
61 | { |
62 | int pasid = -EINVAL; |
63 | |
64 | for (bits = min(bits, 31U); bits > 0; bits--) { |
65 | pasid = ida_simple_get(&amdgpu_pasid_ida, |
66 | 1U << (bits - 1), 1U << bits, |
67 | GFP_KERNEL); |
68 | if (pasid != -ENOSPC) |
69 | break; |
70 | } |
71 | |
72 | if (pasid >= 0) |
73 | trace_amdgpu_pasid_allocated(pasid); |
74 | |
75 | return pasid; |
76 | } |
77 | |
78 | /** |
79 | * amdgpu_pasid_free - Free a PASID |
80 | * @pasid: PASID to free |
81 | */ |
82 | void amdgpu_pasid_free(u32 pasid) |
83 | { |
84 | trace_amdgpu_pasid_freed(pasid); |
85 | ida_simple_remove(&amdgpu_pasid_ida, pasid); |
86 | } |
87 | |
88 | static void amdgpu_pasid_free_cb(struct dma_fence *fence, |
89 | struct dma_fence_cb *_cb) |
90 | { |
91 | struct amdgpu_pasid_cb *cb = |
92 | container_of(_cb, struct amdgpu_pasid_cb, cb); |
93 | |
94 | amdgpu_pasid_free(pasid: cb->pasid); |
95 | dma_fence_put(fence); |
96 | kfree(objp: cb); |
97 | } |
98 | |
99 | /** |
100 | * amdgpu_pasid_free_delayed - free pasid when fences signal |
101 | * |
102 | * @resv: reservation object with the fences to wait for |
103 | * @pasid: pasid to free |
104 | * |
105 | * Free the pasid only after all the fences in resv are signaled. |
106 | */ |
107 | void amdgpu_pasid_free_delayed(struct dma_resv *resv, |
108 | u32 pasid) |
109 | { |
110 | struct amdgpu_pasid_cb *cb; |
111 | struct dma_fence *fence; |
112 | int r; |
113 | |
114 | r = dma_resv_get_singleton(obj: resv, usage: DMA_RESV_USAGE_BOOKKEEP, fence: &fence); |
115 | if (r) |
116 | goto fallback; |
117 | |
118 | if (!fence) { |
119 | amdgpu_pasid_free(pasid); |
120 | return; |
121 | } |
122 | |
123 | cb = kmalloc(size: sizeof(*cb), GFP_KERNEL); |
124 | if (!cb) { |
125 | /* Last resort when we are OOM */ |
126 | dma_fence_wait(fence, intr: false); |
127 | dma_fence_put(fence); |
128 | amdgpu_pasid_free(pasid); |
129 | } else { |
130 | cb->pasid = pasid; |
131 | if (dma_fence_add_callback(fence, cb: &cb->cb, |
132 | func: amdgpu_pasid_free_cb)) |
133 | amdgpu_pasid_free_cb(fence, cb: &cb->cb); |
134 | } |
135 | |
136 | return; |
137 | |
138 | fallback: |
139 | /* Not enough memory for the delayed delete, as last resort |
140 | * block for all the fences to complete. |
141 | */ |
142 | dma_resv_wait_timeout(obj: resv, usage: DMA_RESV_USAGE_BOOKKEEP, |
143 | intr: false, MAX_SCHEDULE_TIMEOUT); |
144 | amdgpu_pasid_free(pasid); |
145 | } |
146 | |
147 | /* |
148 | * VMID manager |
149 | * |
150 | * VMIDs are a per VMHUB identifier for page tables handling. |
151 | */ |
152 | |
153 | /** |
154 | * amdgpu_vmid_had_gpu_reset - check if reset occured since last use |
155 | * |
156 | * @adev: amdgpu_device pointer |
157 | * @id: VMID structure |
158 | * |
159 | * Check if GPU reset occured since last use of the VMID. |
160 | */ |
161 | bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, |
162 | struct amdgpu_vmid *id) |
163 | { |
164 | return id->current_gpu_reset_count != |
165 | atomic_read(v: &adev->gpu_reset_counter); |
166 | } |
167 | |
168 | /* Check if we need to switch to another set of resources */ |
169 | static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id, |
170 | struct amdgpu_job *job) |
171 | { |
172 | return id->gds_base != job->gds_base || |
173 | id->gds_size != job->gds_size || |
174 | id->gws_base != job->gws_base || |
175 | id->gws_size != job->gws_size || |
176 | id->oa_base != job->oa_base || |
177 | id->oa_size != job->oa_size; |
178 | } |
179 | |
180 | /* Check if the id is compatible with the job */ |
181 | static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id, |
182 | struct amdgpu_job *job) |
183 | { |
184 | return id->pd_gpu_addr == job->vm_pd_addr && |
185 | !amdgpu_vmid_gds_switch_needed(id, job); |
186 | } |
187 | |
188 | /** |
189 | * amdgpu_vmid_grab_idle - grab idle VMID |
190 | * |
191 | * @ring: ring we want to submit job to |
192 | * @idle: resulting idle VMID |
193 | * @fence: fence to wait for if no id could be grabbed |
194 | * |
195 | * Try to find an idle VMID, if none is idle add a fence to wait to the sync |
196 | * object. Returns -ENOMEM when we are out of memory. |
197 | */ |
198 | static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring, |
199 | struct amdgpu_vmid **idle, |
200 | struct dma_fence **fence) |
201 | { |
202 | struct amdgpu_device *adev = ring->adev; |
203 | unsigned vmhub = ring->vm_hub; |
204 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
205 | struct dma_fence **fences; |
206 | unsigned i; |
207 | |
208 | if (!dma_fence_is_signaled(fence: ring->vmid_wait)) { |
209 | *fence = dma_fence_get(fence: ring->vmid_wait); |
210 | return 0; |
211 | } |
212 | |
213 | fences = kmalloc_array(n: id_mgr->num_ids, size: sizeof(void *), GFP_KERNEL); |
214 | if (!fences) |
215 | return -ENOMEM; |
216 | |
217 | /* Check if we have an idle VMID */ |
218 | i = 0; |
219 | list_for_each_entry((*idle), &id_mgr->ids_lru, list) { |
220 | /* Don't use per engine and per process VMID at the same time */ |
221 | struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ? |
222 | NULL : ring; |
223 | |
224 | fences[i] = amdgpu_sync_peek_fence(sync: &(*idle)->active, ring: r); |
225 | if (!fences[i]) |
226 | break; |
227 | ++i; |
228 | } |
229 | |
230 | /* If we can't find a idle VMID to use, wait till one becomes available */ |
231 | if (&(*idle)->list == &id_mgr->ids_lru) { |
232 | u64 fence_context = adev->vm_manager.fence_context + ring->idx; |
233 | unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; |
234 | struct dma_fence_array *array; |
235 | unsigned j; |
236 | |
237 | *idle = NULL; |
238 | for (j = 0; j < i; ++j) |
239 | dma_fence_get(fence: fences[j]); |
240 | |
241 | array = dma_fence_array_create(num_fences: i, fences, context: fence_context, |
242 | seqno, signal_on_any: true); |
243 | if (!array) { |
244 | for (j = 0; j < i; ++j) |
245 | dma_fence_put(fence: fences[j]); |
246 | kfree(objp: fences); |
247 | return -ENOMEM; |
248 | } |
249 | |
250 | *fence = dma_fence_get(fence: &array->base); |
251 | dma_fence_put(fence: ring->vmid_wait); |
252 | ring->vmid_wait = &array->base; |
253 | return 0; |
254 | } |
255 | kfree(objp: fences); |
256 | |
257 | return 0; |
258 | } |
259 | |
260 | /** |
261 | * amdgpu_vmid_grab_reserved - try to assign reserved VMID |
262 | * |
263 | * @vm: vm to allocate id for |
264 | * @ring: ring we want to submit job to |
265 | * @job: job who wants to use the VMID |
266 | * @id: resulting VMID |
267 | * @fence: fence to wait for if no id could be grabbed |
268 | * |
269 | * Try to assign a reserved VMID. |
270 | */ |
271 | static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, |
272 | struct amdgpu_ring *ring, |
273 | struct amdgpu_job *job, |
274 | struct amdgpu_vmid **id, |
275 | struct dma_fence **fence) |
276 | { |
277 | struct amdgpu_device *adev = ring->adev; |
278 | unsigned vmhub = ring->vm_hub; |
279 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
280 | uint64_t fence_context = adev->fence_context + ring->idx; |
281 | bool needs_flush = vm->use_cpu_for_update; |
282 | uint64_t updates = amdgpu_vm_tlb_seq(vm); |
283 | int r; |
284 | |
285 | *id = id_mgr->reserved; |
286 | if ((*id)->owner != vm->immediate.fence_context || |
287 | !amdgpu_vmid_compatible(id: *id, job) || |
288 | (*id)->flushed_updates < updates || |
289 | !(*id)->last_flush || |
290 | ((*id)->last_flush->context != fence_context && |
291 | !dma_fence_is_signaled(fence: (*id)->last_flush))) { |
292 | struct dma_fence *tmp; |
293 | |
294 | /* Don't use per engine and per process VMID at the same time */ |
295 | if (adev->vm_manager.concurrent_flush) |
296 | ring = NULL; |
297 | |
298 | /* to prevent one context starved by another context */ |
299 | (*id)->pd_gpu_addr = 0; |
300 | tmp = amdgpu_sync_peek_fence(sync: &(*id)->active, ring); |
301 | if (tmp) { |
302 | *id = NULL; |
303 | *fence = dma_fence_get(fence: tmp); |
304 | return 0; |
305 | } |
306 | needs_flush = true; |
307 | } |
308 | |
309 | /* Good we can use this VMID. Remember this submission as |
310 | * user of the VMID. |
311 | */ |
312 | r = amdgpu_sync_fence(sync: &(*id)->active, f: &job->base.s_fence->finished); |
313 | if (r) |
314 | return r; |
315 | |
316 | job->vm_needs_flush = needs_flush; |
317 | job->spm_update_needed = true; |
318 | return 0; |
319 | } |
320 | |
321 | /** |
322 | * amdgpu_vmid_grab_used - try to reuse a VMID |
323 | * |
324 | * @vm: vm to allocate id for |
325 | * @ring: ring we want to submit job to |
326 | * @job: job who wants to use the VMID |
327 | * @id: resulting VMID |
328 | * @fence: fence to wait for if no id could be grabbed |
329 | * |
330 | * Try to reuse a VMID for this submission. |
331 | */ |
332 | static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, |
333 | struct amdgpu_ring *ring, |
334 | struct amdgpu_job *job, |
335 | struct amdgpu_vmid **id, |
336 | struct dma_fence **fence) |
337 | { |
338 | struct amdgpu_device *adev = ring->adev; |
339 | unsigned vmhub = ring->vm_hub; |
340 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
341 | uint64_t fence_context = adev->fence_context + ring->idx; |
342 | uint64_t updates = amdgpu_vm_tlb_seq(vm); |
343 | int r; |
344 | |
345 | job->vm_needs_flush = vm->use_cpu_for_update; |
346 | |
347 | /* Check if we can use a VMID already assigned to this VM */ |
348 | list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) { |
349 | bool needs_flush = vm->use_cpu_for_update; |
350 | |
351 | /* Check all the prerequisites to using this VMID */ |
352 | if ((*id)->owner != vm->immediate.fence_context) |
353 | continue; |
354 | |
355 | if (!amdgpu_vmid_compatible(id: *id, job)) |
356 | continue; |
357 | |
358 | if (!(*id)->last_flush || |
359 | ((*id)->last_flush->context != fence_context && |
360 | !dma_fence_is_signaled(fence: (*id)->last_flush))) |
361 | needs_flush = true; |
362 | |
363 | if ((*id)->flushed_updates < updates) |
364 | needs_flush = true; |
365 | |
366 | if (needs_flush && !adev->vm_manager.concurrent_flush) |
367 | continue; |
368 | |
369 | /* Good, we can use this VMID. Remember this submission as |
370 | * user of the VMID. |
371 | */ |
372 | r = amdgpu_sync_fence(sync: &(*id)->active, |
373 | f: &job->base.s_fence->finished); |
374 | if (r) |
375 | return r; |
376 | |
377 | job->vm_needs_flush |= needs_flush; |
378 | return 0; |
379 | } |
380 | |
381 | *id = NULL; |
382 | return 0; |
383 | } |
384 | |
385 | /** |
386 | * amdgpu_vmid_grab - allocate the next free VMID |
387 | * |
388 | * @vm: vm to allocate id for |
389 | * @ring: ring we want to submit job to |
390 | * @job: job who wants to use the VMID |
391 | * @fence: fence to wait for if no id could be grabbed |
392 | * |
393 | * Allocate an id for the vm, adding fences to the sync obj as necessary. |
394 | */ |
395 | int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
396 | struct amdgpu_job *job, struct dma_fence **fence) |
397 | { |
398 | struct amdgpu_device *adev = ring->adev; |
399 | unsigned vmhub = ring->vm_hub; |
400 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
401 | struct amdgpu_vmid *idle = NULL; |
402 | struct amdgpu_vmid *id = NULL; |
403 | int r = 0; |
404 | |
405 | mutex_lock(&id_mgr->lock); |
406 | r = amdgpu_vmid_grab_idle(ring, idle: &idle, fence); |
407 | if (r || !idle) |
408 | goto error; |
409 | |
410 | if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) { |
411 | r = amdgpu_vmid_grab_reserved(vm, ring, job, id: &id, fence); |
412 | if (r || !id) |
413 | goto error; |
414 | } else { |
415 | r = amdgpu_vmid_grab_used(vm, ring, job, id: &id, fence); |
416 | if (r) |
417 | goto error; |
418 | |
419 | if (!id) { |
420 | /* Still no ID to use? Then use the idle one found earlier */ |
421 | id = idle; |
422 | |
423 | /* Remember this submission as user of the VMID */ |
424 | r = amdgpu_sync_fence(sync: &id->active, |
425 | f: &job->base.s_fence->finished); |
426 | if (r) |
427 | goto error; |
428 | |
429 | job->vm_needs_flush = true; |
430 | } |
431 | |
432 | list_move_tail(list: &id->list, head: &id_mgr->ids_lru); |
433 | } |
434 | |
435 | job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job); |
436 | if (job->vm_needs_flush) { |
437 | id->flushed_updates = amdgpu_vm_tlb_seq(vm); |
438 | dma_fence_put(fence: id->last_flush); |
439 | id->last_flush = NULL; |
440 | } |
441 | job->vmid = id - id_mgr->ids; |
442 | job->pasid = vm->pasid; |
443 | |
444 | id->gds_base = job->gds_base; |
445 | id->gds_size = job->gds_size; |
446 | id->gws_base = job->gws_base; |
447 | id->gws_size = job->gws_size; |
448 | id->oa_base = job->oa_base; |
449 | id->oa_size = job->oa_size; |
450 | id->pd_gpu_addr = job->vm_pd_addr; |
451 | id->owner = vm->immediate.fence_context; |
452 | |
453 | trace_amdgpu_vm_grab_id(vm, ring, job); |
454 | |
455 | error: |
456 | mutex_unlock(lock: &id_mgr->lock); |
457 | return r; |
458 | } |
459 | |
460 | int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, |
461 | unsigned vmhub) |
462 | { |
463 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
464 | |
465 | mutex_lock(&id_mgr->lock); |
466 | |
467 | ++id_mgr->reserved_use_count; |
468 | if (!id_mgr->reserved) { |
469 | struct amdgpu_vmid *id; |
470 | |
471 | id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, |
472 | list); |
473 | /* Remove from normal round robin handling */ |
474 | list_del_init(entry: &id->list); |
475 | id_mgr->reserved = id; |
476 | } |
477 | |
478 | mutex_unlock(lock: &id_mgr->lock); |
479 | return 0; |
480 | } |
481 | |
482 | void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, |
483 | unsigned vmhub) |
484 | { |
485 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
486 | |
487 | mutex_lock(&id_mgr->lock); |
488 | if (!--id_mgr->reserved_use_count) { |
489 | /* give the reserved ID back to normal round robin */ |
490 | list_add(new: &id_mgr->reserved->list, head: &id_mgr->ids_lru); |
491 | id_mgr->reserved = NULL; |
492 | } |
493 | |
494 | mutex_unlock(lock: &id_mgr->lock); |
495 | } |
496 | |
497 | /** |
498 | * amdgpu_vmid_reset - reset VMID to zero |
499 | * |
500 | * @adev: amdgpu device structure |
501 | * @vmhub: vmhub type |
502 | * @vmid: vmid number to use |
503 | * |
504 | * Reset saved GDW, GWS and OA to force switch on next flush. |
505 | */ |
506 | void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, |
507 | unsigned vmid) |
508 | { |
509 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
510 | struct amdgpu_vmid *id = &id_mgr->ids[vmid]; |
511 | |
512 | mutex_lock(&id_mgr->lock); |
513 | id->owner = 0; |
514 | id->gds_base = 0; |
515 | id->gds_size = 0; |
516 | id->gws_base = 0; |
517 | id->gws_size = 0; |
518 | id->oa_base = 0; |
519 | id->oa_size = 0; |
520 | mutex_unlock(lock: &id_mgr->lock); |
521 | } |
522 | |
523 | /** |
524 | * amdgpu_vmid_reset_all - reset VMID to zero |
525 | * |
526 | * @adev: amdgpu device structure |
527 | * |
528 | * Reset VMID to force flush on next use |
529 | */ |
530 | void amdgpu_vmid_reset_all(struct amdgpu_device *adev) |
531 | { |
532 | unsigned i, j; |
533 | |
534 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { |
535 | struct amdgpu_vmid_mgr *id_mgr = |
536 | &adev->vm_manager.id_mgr[i]; |
537 | |
538 | for (j = 1; j < id_mgr->num_ids; ++j) |
539 | amdgpu_vmid_reset(adev, vmhub: i, vmid: j); |
540 | } |
541 | } |
542 | |
543 | /** |
544 | * amdgpu_vmid_mgr_init - init the VMID manager |
545 | * |
546 | * @adev: amdgpu_device pointer |
547 | * |
548 | * Initialize the VM manager structures |
549 | */ |
550 | void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) |
551 | { |
552 | unsigned i, j; |
553 | |
554 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { |
555 | struct amdgpu_vmid_mgr *id_mgr = |
556 | &adev->vm_manager.id_mgr[i]; |
557 | |
558 | mutex_init(&id_mgr->lock); |
559 | INIT_LIST_HEAD(list: &id_mgr->ids_lru); |
560 | id_mgr->reserved_use_count = 0; |
561 | |
562 | /* manage only VMIDs not used by KFD */ |
563 | id_mgr->num_ids = adev->vm_manager.first_kfd_vmid; |
564 | |
565 | /* skip over VMID 0, since it is the system VM */ |
566 | for (j = 1; j < id_mgr->num_ids; ++j) { |
567 | amdgpu_vmid_reset(adev, vmhub: i, vmid: j); |
568 | amdgpu_sync_create(sync: &id_mgr->ids[j].active); |
569 | list_add_tail(new: &id_mgr->ids[j].list, head: &id_mgr->ids_lru); |
570 | } |
571 | } |
572 | /* alloc a default reserved vmid to enforce isolation */ |
573 | if (enforce_isolation) |
574 | amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0)); |
575 | |
576 | } |
577 | |
578 | /** |
579 | * amdgpu_vmid_mgr_fini - cleanup VM manager |
580 | * |
581 | * @adev: amdgpu_device pointer |
582 | * |
583 | * Cleanup the VM manager and free resources. |
584 | */ |
585 | void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev) |
586 | { |
587 | unsigned i, j; |
588 | |
589 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { |
590 | struct amdgpu_vmid_mgr *id_mgr = |
591 | &adev->vm_manager.id_mgr[i]; |
592 | |
593 | mutex_destroy(lock: &id_mgr->lock); |
594 | for (j = 0; j < AMDGPU_NUM_VMID; ++j) { |
595 | struct amdgpu_vmid *id = &id_mgr->ids[j]; |
596 | |
597 | amdgpu_sync_free(sync: &id->active); |
598 | dma_fence_put(fence: id->last_flush); |
599 | dma_fence_put(fence: id->pasid_mapping); |
600 | } |
601 | } |
602 | } |
603 | |