1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | */ |
22 | |
23 | #include <linux/mutex.h> |
24 | #include <linux/log2.h> |
25 | #include <linux/sched.h> |
26 | #include <linux/sched/mm.h> |
27 | #include <linux/sched/task.h> |
28 | #include <linux/slab.h> |
29 | #include <linux/amd-iommu.h> |
30 | #include <linux/notifier.h> |
31 | #include <linux/compat.h> |
32 | #include <linux/mman.h> |
33 | #include <linux/file.h> |
34 | #include "amdgpu_amdkfd.h" |
35 | |
36 | struct mm_struct; |
37 | |
38 | #include "kfd_priv.h" |
39 | #include "kfd_device_queue_manager.h" |
40 | #include "kfd_dbgmgr.h" |
41 | #include "kfd_iommu.h" |
42 | |
43 | /* |
44 | * List of struct kfd_process (field kfd_process). |
45 | * Unique/indexed by mm_struct* |
46 | */ |
47 | DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); |
48 | static DEFINE_MUTEX(kfd_processes_mutex); |
49 | |
50 | DEFINE_SRCU(kfd_processes_srcu); |
51 | |
52 | /* For process termination handling */ |
53 | static struct workqueue_struct *kfd_process_wq; |
54 | |
55 | /* Ordered, single-threaded workqueue for restoring evicted |
56 | * processes. Restoring multiple processes concurrently under memory |
57 | * pressure can lead to processes blocking each other from validating |
58 | * their BOs and result in a live-lock situation where processes |
59 | * remain evicted indefinitely. |
60 | */ |
61 | static struct workqueue_struct *kfd_restore_wq; |
62 | |
63 | static struct kfd_process *find_process(const struct task_struct *thread); |
64 | static void kfd_process_ref_release(struct kref *ref); |
65 | static struct kfd_process *create_process(const struct task_struct *thread, |
66 | struct file *filep); |
67 | |
68 | static void evict_process_worker(struct work_struct *work); |
69 | static void restore_process_worker(struct work_struct *work); |
70 | |
71 | |
72 | int kfd_process_create_wq(void) |
73 | { |
74 | if (!kfd_process_wq) |
75 | kfd_process_wq = alloc_workqueue("kfd_process_wq" , 0, 0); |
76 | if (!kfd_restore_wq) |
77 | kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq" , 0); |
78 | |
79 | if (!kfd_process_wq || !kfd_restore_wq) { |
80 | kfd_process_destroy_wq(); |
81 | return -ENOMEM; |
82 | } |
83 | |
84 | return 0; |
85 | } |
86 | |
87 | void kfd_process_destroy_wq(void) |
88 | { |
89 | if (kfd_process_wq) { |
90 | destroy_workqueue(kfd_process_wq); |
91 | kfd_process_wq = NULL; |
92 | } |
93 | if (kfd_restore_wq) { |
94 | destroy_workqueue(kfd_restore_wq); |
95 | kfd_restore_wq = NULL; |
96 | } |
97 | } |
98 | |
99 | static void kfd_process_free_gpuvm(struct kgd_mem *mem, |
100 | struct kfd_process_device *pdd) |
101 | { |
102 | struct kfd_dev *dev = pdd->dev; |
103 | |
104 | amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm); |
105 | amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem); |
106 | } |
107 | |
108 | /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process |
109 | * This function should be only called right after the process |
110 | * is created and when kfd_processes_mutex is still being held |
111 | * to avoid concurrency. Because of that exclusiveness, we do |
112 | * not need to take p->mutex. |
113 | */ |
114 | static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, |
115 | uint64_t gpu_va, uint32_t size, |
116 | uint32_t flags, void **kptr) |
117 | { |
118 | struct kfd_dev *kdev = pdd->dev; |
119 | struct kgd_mem *mem = NULL; |
120 | int handle; |
121 | int err; |
122 | |
123 | err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size, |
124 | pdd->vm, &mem, NULL, flags); |
125 | if (err) |
126 | goto err_alloc_mem; |
127 | |
128 | err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm); |
129 | if (err) |
130 | goto err_map_mem; |
131 | |
132 | err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true); |
133 | if (err) { |
134 | pr_debug("Sync memory failed, wait interrupted by user signal\n" ); |
135 | goto sync_memory_failed; |
136 | } |
137 | |
138 | /* Create an obj handle so kfd_process_device_remove_obj_handle |
139 | * will take care of the bo removal when the process finishes. |
140 | * We do not need to take p->mutex, because the process is just |
141 | * created and the ioctls have not had the chance to run. |
142 | */ |
143 | handle = kfd_process_device_create_obj_handle(pdd, mem); |
144 | |
145 | if (handle < 0) { |
146 | err = handle; |
147 | goto free_gpuvm; |
148 | } |
149 | |
150 | if (kptr) { |
151 | err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd, |
152 | (struct kgd_mem *)mem, kptr, NULL); |
153 | if (err) { |
154 | pr_debug("Map GTT BO to kernel failed\n" ); |
155 | goto free_obj_handle; |
156 | } |
157 | } |
158 | |
159 | return err; |
160 | |
161 | free_obj_handle: |
162 | kfd_process_device_remove_obj_handle(pdd, handle); |
163 | free_gpuvm: |
164 | sync_memory_failed: |
165 | kfd_process_free_gpuvm(mem, pdd); |
166 | return err; |
167 | |
168 | err_map_mem: |
169 | amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem); |
170 | err_alloc_mem: |
171 | *kptr = NULL; |
172 | return err; |
173 | } |
174 | |
175 | /* kfd_process_device_reserve_ib_mem - Reserve memory inside the |
176 | * process for IB usage The memory reserved is for KFD to submit |
177 | * IB to AMDGPU from kernel. If the memory is reserved |
178 | * successfully, ib_kaddr will have the CPU/kernel |
179 | * address. Check ib_kaddr before accessing the memory. |
180 | */ |
181 | static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd) |
182 | { |
183 | struct qcm_process_device *qpd = &pdd->qpd; |
184 | uint32_t flags = ALLOC_MEM_FLAGS_GTT | |
185 | ALLOC_MEM_FLAGS_NO_SUBSTITUTE | |
186 | ALLOC_MEM_FLAGS_WRITABLE | |
187 | ALLOC_MEM_FLAGS_EXECUTABLE; |
188 | void *kaddr; |
189 | int ret; |
190 | |
191 | if (qpd->ib_kaddr || !qpd->ib_base) |
192 | return 0; |
193 | |
194 | /* ib_base is only set for dGPU */ |
195 | ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags, |
196 | &kaddr); |
197 | if (ret) |
198 | return ret; |
199 | |
200 | qpd->ib_kaddr = kaddr; |
201 | |
202 | return 0; |
203 | } |
204 | |
205 | struct kfd_process *kfd_create_process(struct file *filep) |
206 | { |
207 | struct kfd_process *process; |
208 | struct task_struct *thread = current; |
209 | |
210 | if (!thread->mm) |
211 | return ERR_PTR(-EINVAL); |
212 | |
213 | /* Only the pthreads threading model is supported. */ |
214 | if (thread->group_leader->mm != thread->mm) |
215 | return ERR_PTR(-EINVAL); |
216 | |
217 | /* |
218 | * take kfd processes mutex before starting of process creation |
219 | * so there won't be a case where two threads of the same process |
220 | * create two kfd_process structures |
221 | */ |
222 | mutex_lock(&kfd_processes_mutex); |
223 | |
224 | /* A prior open of /dev/kfd could have already created the process. */ |
225 | process = find_process(thread); |
226 | if (process) |
227 | pr_debug("Process already found\n" ); |
228 | else |
229 | process = create_process(thread, filep); |
230 | |
231 | mutex_unlock(&kfd_processes_mutex); |
232 | |
233 | return process; |
234 | } |
235 | |
236 | struct kfd_process *kfd_get_process(const struct task_struct *thread) |
237 | { |
238 | struct kfd_process *process; |
239 | |
240 | if (!thread->mm) |
241 | return ERR_PTR(-EINVAL); |
242 | |
243 | /* Only the pthreads threading model is supported. */ |
244 | if (thread->group_leader->mm != thread->mm) |
245 | return ERR_PTR(-EINVAL); |
246 | |
247 | process = find_process(thread); |
248 | if (!process) |
249 | return ERR_PTR(-EINVAL); |
250 | |
251 | return process; |
252 | } |
253 | |
254 | static struct kfd_process *find_process_by_mm(const struct mm_struct *mm) |
255 | { |
256 | struct kfd_process *process; |
257 | |
258 | hash_for_each_possible_rcu(kfd_processes_table, process, |
259 | kfd_processes, (uintptr_t)mm) |
260 | if (process->mm == mm) |
261 | return process; |
262 | |
263 | return NULL; |
264 | } |
265 | |
266 | static struct kfd_process *find_process(const struct task_struct *thread) |
267 | { |
268 | struct kfd_process *p; |
269 | int idx; |
270 | |
271 | idx = srcu_read_lock(&kfd_processes_srcu); |
272 | p = find_process_by_mm(thread->mm); |
273 | srcu_read_unlock(&kfd_processes_srcu, idx); |
274 | |
275 | return p; |
276 | } |
277 | |
278 | void kfd_unref_process(struct kfd_process *p) |
279 | { |
280 | kref_put(&p->ref, kfd_process_ref_release); |
281 | } |
282 | |
283 | static void kfd_process_device_free_bos(struct kfd_process_device *pdd) |
284 | { |
285 | struct kfd_process *p = pdd->process; |
286 | void *mem; |
287 | int id; |
288 | |
289 | /* |
290 | * Remove all handles from idr and release appropriate |
291 | * local memory object |
292 | */ |
293 | idr_for_each_entry(&pdd->alloc_idr, mem, id) { |
294 | struct kfd_process_device *peer_pdd; |
295 | |
296 | list_for_each_entry(peer_pdd, &p->per_device_data, |
297 | per_device_list) { |
298 | if (!peer_pdd->vm) |
299 | continue; |
300 | amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( |
301 | peer_pdd->dev->kgd, mem, peer_pdd->vm); |
302 | } |
303 | |
304 | amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem); |
305 | kfd_process_device_remove_obj_handle(pdd, id); |
306 | } |
307 | } |
308 | |
309 | static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p) |
310 | { |
311 | struct kfd_process_device *pdd; |
312 | |
313 | list_for_each_entry(pdd, &p->per_device_data, per_device_list) |
314 | kfd_process_device_free_bos(pdd); |
315 | } |
316 | |
317 | static void kfd_process_destroy_pdds(struct kfd_process *p) |
318 | { |
319 | struct kfd_process_device *pdd, *temp; |
320 | |
321 | list_for_each_entry_safe(pdd, temp, &p->per_device_data, |
322 | per_device_list) { |
323 | pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n" , |
324 | pdd->dev->id, p->pasid); |
325 | |
326 | if (pdd->drm_file) { |
327 | amdgpu_amdkfd_gpuvm_release_process_vm( |
328 | pdd->dev->kgd, pdd->vm); |
329 | fput(pdd->drm_file); |
330 | } |
331 | else if (pdd->vm) |
332 | amdgpu_amdkfd_gpuvm_destroy_process_vm( |
333 | pdd->dev->kgd, pdd->vm); |
334 | |
335 | list_del(&pdd->per_device_list); |
336 | |
337 | if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base) |
338 | free_pages((unsigned long)pdd->qpd.cwsr_kaddr, |
339 | get_order(KFD_CWSR_TBA_TMA_SIZE)); |
340 | |
341 | kfree(pdd->qpd.doorbell_bitmap); |
342 | idr_destroy(&pdd->alloc_idr); |
343 | |
344 | kfree(pdd); |
345 | } |
346 | } |
347 | |
348 | /* No process locking is needed in this function, because the process |
349 | * is not findable any more. We must assume that no other thread is |
350 | * using it any more, otherwise we couldn't safely free the process |
351 | * structure in the end. |
352 | */ |
353 | static void kfd_process_wq_release(struct work_struct *work) |
354 | { |
355 | struct kfd_process *p = container_of(work, struct kfd_process, |
356 | release_work); |
357 | |
358 | kfd_iommu_unbind_process(p); |
359 | |
360 | kfd_process_free_outstanding_kfd_bos(p); |
361 | |
362 | kfd_process_destroy_pdds(p); |
363 | dma_fence_put(p->ef); |
364 | |
365 | kfd_event_free_process(p); |
366 | |
367 | kfd_pasid_free(p->pasid); |
368 | kfd_free_process_doorbells(p); |
369 | |
370 | mutex_destroy(&p->mutex); |
371 | |
372 | put_task_struct(p->lead_thread); |
373 | |
374 | kfree(p); |
375 | } |
376 | |
377 | static void kfd_process_ref_release(struct kref *ref) |
378 | { |
379 | struct kfd_process *p = container_of(ref, struct kfd_process, ref); |
380 | |
381 | INIT_WORK(&p->release_work, kfd_process_wq_release); |
382 | queue_work(kfd_process_wq, &p->release_work); |
383 | } |
384 | |
385 | static void kfd_process_destroy_delayed(struct rcu_head *rcu) |
386 | { |
387 | struct kfd_process *p = container_of(rcu, struct kfd_process, rcu); |
388 | |
389 | kfd_unref_process(p); |
390 | } |
391 | |
392 | static void kfd_process_notifier_release(struct mmu_notifier *mn, |
393 | struct mm_struct *mm) |
394 | { |
395 | struct kfd_process *p; |
396 | struct kfd_process_device *pdd = NULL; |
397 | |
398 | /* |
399 | * The kfd_process structure can not be free because the |
400 | * mmu_notifier srcu is read locked |
401 | */ |
402 | p = container_of(mn, struct kfd_process, mmu_notifier); |
403 | if (WARN_ON(p->mm != mm)) |
404 | return; |
405 | |
406 | mutex_lock(&kfd_processes_mutex); |
407 | hash_del_rcu(&p->kfd_processes); |
408 | mutex_unlock(&kfd_processes_mutex); |
409 | synchronize_srcu(&kfd_processes_srcu); |
410 | |
411 | cancel_delayed_work_sync(&p->eviction_work); |
412 | cancel_delayed_work_sync(&p->restore_work); |
413 | |
414 | mutex_lock(&p->mutex); |
415 | |
416 | /* Iterate over all process device data structures and if the |
417 | * pdd is in debug mode, we should first force unregistration, |
418 | * then we will be able to destroy the queues |
419 | */ |
420 | list_for_each_entry(pdd, &p->per_device_data, per_device_list) { |
421 | struct kfd_dev *dev = pdd->dev; |
422 | |
423 | mutex_lock(kfd_get_dbgmgr_mutex()); |
424 | if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) { |
425 | if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) { |
426 | kfd_dbgmgr_destroy(dev->dbgmgr); |
427 | dev->dbgmgr = NULL; |
428 | } |
429 | } |
430 | mutex_unlock(kfd_get_dbgmgr_mutex()); |
431 | } |
432 | |
433 | kfd_process_dequeue_from_all_devices(p); |
434 | pqm_uninit(&p->pqm); |
435 | |
436 | /* Indicate to other users that MM is no longer valid */ |
437 | p->mm = NULL; |
438 | |
439 | mutex_unlock(&p->mutex); |
440 | |
441 | mmu_notifier_unregister_no_release(&p->mmu_notifier, mm); |
442 | mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed); |
443 | } |
444 | |
445 | static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = { |
446 | .release = kfd_process_notifier_release, |
447 | }; |
448 | |
449 | static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) |
450 | { |
451 | unsigned long offset; |
452 | struct kfd_process_device *pdd; |
453 | |
454 | list_for_each_entry(pdd, &p->per_device_data, per_device_list) { |
455 | struct kfd_dev *dev = pdd->dev; |
456 | struct qcm_process_device *qpd = &pdd->qpd; |
457 | |
458 | if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) |
459 | continue; |
460 | |
461 | offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id)) |
462 | << PAGE_SHIFT; |
463 | qpd->tba_addr = (int64_t)vm_mmap(filep, 0, |
464 | KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC, |
465 | MAP_SHARED, offset); |
466 | |
467 | if (IS_ERR_VALUE(qpd->tba_addr)) { |
468 | int err = qpd->tba_addr; |
469 | |
470 | pr_err("Failure to set tba address. error %d.\n" , err); |
471 | qpd->tba_addr = 0; |
472 | qpd->cwsr_kaddr = NULL; |
473 | return err; |
474 | } |
475 | |
476 | memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size); |
477 | |
478 | qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; |
479 | pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n" , |
480 | qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); |
481 | } |
482 | |
483 | return 0; |
484 | } |
485 | |
486 | static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) |
487 | { |
488 | struct kfd_dev *dev = pdd->dev; |
489 | struct qcm_process_device *qpd = &pdd->qpd; |
490 | uint32_t flags = ALLOC_MEM_FLAGS_GTT | |
491 | ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE; |
492 | void *kaddr; |
493 | int ret; |
494 | |
495 | if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) |
496 | return 0; |
497 | |
498 | /* cwsr_base is only set for dGPU */ |
499 | ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base, |
500 | KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr); |
501 | if (ret) |
502 | return ret; |
503 | |
504 | qpd->cwsr_kaddr = kaddr; |
505 | qpd->tba_addr = qpd->cwsr_base; |
506 | |
507 | memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size); |
508 | |
509 | qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; |
510 | pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n" , |
511 | qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); |
512 | |
513 | return 0; |
514 | } |
515 | |
516 | static struct kfd_process *create_process(const struct task_struct *thread, |
517 | struct file *filep) |
518 | { |
519 | struct kfd_process *process; |
520 | int err = -ENOMEM; |
521 | |
522 | process = kzalloc(sizeof(*process), GFP_KERNEL); |
523 | |
524 | if (!process) |
525 | goto err_alloc_process; |
526 | |
527 | process->pasid = kfd_pasid_alloc(); |
528 | if (process->pasid == 0) |
529 | goto err_alloc_pasid; |
530 | |
531 | if (kfd_alloc_process_doorbells(process) < 0) |
532 | goto err_alloc_doorbells; |
533 | |
534 | kref_init(&process->ref); |
535 | |
536 | mutex_init(&process->mutex); |
537 | |
538 | process->mm = thread->mm; |
539 | |
540 | /* register notifier */ |
541 | process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops; |
542 | err = mmu_notifier_register(&process->mmu_notifier, process->mm); |
543 | if (err) |
544 | goto err_mmu_notifier; |
545 | |
546 | hash_add_rcu(kfd_processes_table, &process->kfd_processes, |
547 | (uintptr_t)process->mm); |
548 | |
549 | process->lead_thread = thread->group_leader; |
550 | get_task_struct(process->lead_thread); |
551 | |
552 | INIT_LIST_HEAD(&process->per_device_data); |
553 | |
554 | kfd_event_init_process(process); |
555 | |
556 | err = pqm_init(&process->pqm, process); |
557 | if (err != 0) |
558 | goto err_process_pqm_init; |
559 | |
560 | /* init process apertures*/ |
561 | process->is_32bit_user_mode = in_compat_syscall(); |
562 | err = kfd_init_apertures(process); |
563 | if (err != 0) |
564 | goto err_init_apertures; |
565 | |
566 | INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker); |
567 | INIT_DELAYED_WORK(&process->restore_work, restore_process_worker); |
568 | process->last_restore_timestamp = get_jiffies_64(); |
569 | |
570 | err = kfd_process_init_cwsr_apu(process, filep); |
571 | if (err) |
572 | goto err_init_cwsr; |
573 | |
574 | return process; |
575 | |
576 | err_init_cwsr: |
577 | kfd_process_free_outstanding_kfd_bos(process); |
578 | kfd_process_destroy_pdds(process); |
579 | err_init_apertures: |
580 | pqm_uninit(&process->pqm); |
581 | err_process_pqm_init: |
582 | hash_del_rcu(&process->kfd_processes); |
583 | synchronize_rcu(); |
584 | mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm); |
585 | err_mmu_notifier: |
586 | mutex_destroy(&process->mutex); |
587 | kfd_free_process_doorbells(process); |
588 | err_alloc_doorbells: |
589 | kfd_pasid_free(process->pasid); |
590 | err_alloc_pasid: |
591 | kfree(process); |
592 | err_alloc_process: |
593 | return ERR_PTR(err); |
594 | } |
595 | |
596 | static int init_doorbell_bitmap(struct qcm_process_device *qpd, |
597 | struct kfd_dev *dev) |
598 | { |
599 | unsigned int i; |
600 | |
601 | if (!KFD_IS_SOC15(dev->device_info->asic_family)) |
602 | return 0; |
603 | |
604 | qpd->doorbell_bitmap = |
605 | kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, |
606 | BITS_PER_BYTE), GFP_KERNEL); |
607 | if (!qpd->doorbell_bitmap) |
608 | return -ENOMEM; |
609 | |
610 | /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */ |
611 | for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) { |
612 | if (i >= dev->shared_resources.non_cp_doorbells_start |
613 | && i <= dev->shared_resources.non_cp_doorbells_end) { |
614 | set_bit(i, qpd->doorbell_bitmap); |
615 | set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, |
616 | qpd->doorbell_bitmap); |
617 | pr_debug("reserved doorbell 0x%03x and 0x%03x\n" , i, |
618 | i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET); |
619 | } |
620 | } |
621 | |
622 | return 0; |
623 | } |
624 | |
625 | struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, |
626 | struct kfd_process *p) |
627 | { |
628 | struct kfd_process_device *pdd = NULL; |
629 | |
630 | list_for_each_entry(pdd, &p->per_device_data, per_device_list) |
631 | if (pdd->dev == dev) |
632 | return pdd; |
633 | |
634 | return NULL; |
635 | } |
636 | |
637 | struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, |
638 | struct kfd_process *p) |
639 | { |
640 | struct kfd_process_device *pdd = NULL; |
641 | |
642 | pdd = kzalloc(sizeof(*pdd), GFP_KERNEL); |
643 | if (!pdd) |
644 | return NULL; |
645 | |
646 | if (init_doorbell_bitmap(&pdd->qpd, dev)) { |
647 | pr_err("Failed to init doorbell for process\n" ); |
648 | kfree(pdd); |
649 | return NULL; |
650 | } |
651 | |
652 | pdd->dev = dev; |
653 | INIT_LIST_HEAD(&pdd->qpd.queues_list); |
654 | INIT_LIST_HEAD(&pdd->qpd.priv_queue_list); |
655 | pdd->qpd.dqm = dev->dqm; |
656 | pdd->qpd.pqm = &p->pqm; |
657 | pdd->qpd.evicted = 0; |
658 | pdd->process = p; |
659 | pdd->bound = PDD_UNBOUND; |
660 | pdd->already_dequeued = false; |
661 | list_add(&pdd->per_device_list, &p->per_device_data); |
662 | |
663 | /* Init idr used for memory handle translation */ |
664 | idr_init(&pdd->alloc_idr); |
665 | |
666 | return pdd; |
667 | } |
668 | |
669 | /** |
670 | * kfd_process_device_init_vm - Initialize a VM for a process-device |
671 | * |
672 | * @pdd: The process-device |
673 | * @drm_file: Optional pointer to a DRM file descriptor |
674 | * |
675 | * If @drm_file is specified, it will be used to acquire the VM from |
676 | * that file descriptor. If successful, the @pdd takes ownership of |
677 | * the file descriptor. |
678 | * |
679 | * If @drm_file is NULL, a new VM is created. |
680 | * |
681 | * Returns 0 on success, -errno on failure. |
682 | */ |
683 | int kfd_process_device_init_vm(struct kfd_process_device *pdd, |
684 | struct file *drm_file) |
685 | { |
686 | struct kfd_process *p; |
687 | struct kfd_dev *dev; |
688 | int ret; |
689 | |
690 | if (pdd->vm) |
691 | return drm_file ? -EBUSY : 0; |
692 | |
693 | p = pdd->process; |
694 | dev = pdd->dev; |
695 | |
696 | if (drm_file) |
697 | ret = amdgpu_amdkfd_gpuvm_acquire_process_vm( |
698 | dev->kgd, drm_file, p->pasid, |
699 | &pdd->vm, &p->kgd_process_info, &p->ef); |
700 | else |
701 | ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid, |
702 | &pdd->vm, &p->kgd_process_info, &p->ef); |
703 | if (ret) { |
704 | pr_err("Failed to create process VM object\n" ); |
705 | return ret; |
706 | } |
707 | |
708 | ret = kfd_process_device_reserve_ib_mem(pdd); |
709 | if (ret) |
710 | goto err_reserve_ib_mem; |
711 | ret = kfd_process_device_init_cwsr_dgpu(pdd); |
712 | if (ret) |
713 | goto err_init_cwsr; |
714 | |
715 | pdd->drm_file = drm_file; |
716 | |
717 | return 0; |
718 | |
719 | err_init_cwsr: |
720 | err_reserve_ib_mem: |
721 | kfd_process_device_free_bos(pdd); |
722 | if (!drm_file) |
723 | amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm); |
724 | pdd->vm = NULL; |
725 | |
726 | return ret; |
727 | } |
728 | |
729 | /* |
730 | * Direct the IOMMU to bind the process (specifically the pasid->mm) |
731 | * to the device. |
732 | * Unbinding occurs when the process dies or the device is removed. |
733 | * |
734 | * Assumes that the process lock is held. |
735 | */ |
736 | struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, |
737 | struct kfd_process *p) |
738 | { |
739 | struct kfd_process_device *pdd; |
740 | int err; |
741 | |
742 | pdd = kfd_get_process_device_data(dev, p); |
743 | if (!pdd) { |
744 | pr_err("Process device data doesn't exist\n" ); |
745 | return ERR_PTR(-ENOMEM); |
746 | } |
747 | |
748 | err = kfd_iommu_bind_process_to_device(pdd); |
749 | if (err) |
750 | return ERR_PTR(err); |
751 | |
752 | err = kfd_process_device_init_vm(pdd, NULL); |
753 | if (err) |
754 | return ERR_PTR(err); |
755 | |
756 | return pdd; |
757 | } |
758 | |
759 | struct kfd_process_device *kfd_get_first_process_device_data( |
760 | struct kfd_process *p) |
761 | { |
762 | return list_first_entry(&p->per_device_data, |
763 | struct kfd_process_device, |
764 | per_device_list); |
765 | } |
766 | |
767 | struct kfd_process_device *kfd_get_next_process_device_data( |
768 | struct kfd_process *p, |
769 | struct kfd_process_device *pdd) |
770 | { |
771 | if (list_is_last(&pdd->per_device_list, &p->per_device_data)) |
772 | return NULL; |
773 | return list_next_entry(pdd, per_device_list); |
774 | } |
775 | |
776 | bool kfd_has_process_device_data(struct kfd_process *p) |
777 | { |
778 | return !(list_empty(&p->per_device_data)); |
779 | } |
780 | |
781 | /* Create specific handle mapped to mem from process local memory idr |
782 | * Assumes that the process lock is held. |
783 | */ |
784 | int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, |
785 | void *mem) |
786 | { |
787 | return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL); |
788 | } |
789 | |
790 | /* Translate specific handle from process local memory idr |
791 | * Assumes that the process lock is held. |
792 | */ |
793 | void *kfd_process_device_translate_handle(struct kfd_process_device *pdd, |
794 | int handle) |
795 | { |
796 | if (handle < 0) |
797 | return NULL; |
798 | |
799 | return idr_find(&pdd->alloc_idr, handle); |
800 | } |
801 | |
802 | /* Remove specific handle from process local memory idr |
803 | * Assumes that the process lock is held. |
804 | */ |
805 | void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, |
806 | int handle) |
807 | { |
808 | if (handle >= 0) |
809 | idr_remove(&pdd->alloc_idr, handle); |
810 | } |
811 | |
812 | /* This increments the process->ref counter. */ |
813 | struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid) |
814 | { |
815 | struct kfd_process *p, *ret_p = NULL; |
816 | unsigned int temp; |
817 | |
818 | int idx = srcu_read_lock(&kfd_processes_srcu); |
819 | |
820 | hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { |
821 | if (p->pasid == pasid) { |
822 | kref_get(&p->ref); |
823 | ret_p = p; |
824 | break; |
825 | } |
826 | } |
827 | |
828 | srcu_read_unlock(&kfd_processes_srcu, idx); |
829 | |
830 | return ret_p; |
831 | } |
832 | |
833 | /* This increments the process->ref counter. */ |
834 | struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm) |
835 | { |
836 | struct kfd_process *p; |
837 | |
838 | int idx = srcu_read_lock(&kfd_processes_srcu); |
839 | |
840 | p = find_process_by_mm(mm); |
841 | if (p) |
842 | kref_get(&p->ref); |
843 | |
844 | srcu_read_unlock(&kfd_processes_srcu, idx); |
845 | |
846 | return p; |
847 | } |
848 | |
849 | /* process_evict_queues - Evict all user queues of a process |
850 | * |
851 | * Eviction is reference-counted per process-device. This means multiple |
852 | * evictions from different sources can be nested safely. |
853 | */ |
854 | int kfd_process_evict_queues(struct kfd_process *p) |
855 | { |
856 | struct kfd_process_device *pdd; |
857 | int r = 0; |
858 | unsigned int n_evicted = 0; |
859 | |
860 | list_for_each_entry(pdd, &p->per_device_data, per_device_list) { |
861 | r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, |
862 | &pdd->qpd); |
863 | if (r) { |
864 | pr_err("Failed to evict process queues\n" ); |
865 | goto fail; |
866 | } |
867 | n_evicted++; |
868 | } |
869 | |
870 | return r; |
871 | |
872 | fail: |
873 | /* To keep state consistent, roll back partial eviction by |
874 | * restoring queues |
875 | */ |
876 | list_for_each_entry(pdd, &p->per_device_data, per_device_list) { |
877 | if (n_evicted == 0) |
878 | break; |
879 | if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, |
880 | &pdd->qpd)) |
881 | pr_err("Failed to restore queues\n" ); |
882 | |
883 | n_evicted--; |
884 | } |
885 | |
886 | return r; |
887 | } |
888 | |
889 | /* process_restore_queues - Restore all user queues of a process */ |
890 | int kfd_process_restore_queues(struct kfd_process *p) |
891 | { |
892 | struct kfd_process_device *pdd; |
893 | int r, ret = 0; |
894 | |
895 | list_for_each_entry(pdd, &p->per_device_data, per_device_list) { |
896 | r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, |
897 | &pdd->qpd); |
898 | if (r) { |
899 | pr_err("Failed to restore process queues\n" ); |
900 | if (!ret) |
901 | ret = r; |
902 | } |
903 | } |
904 | |
905 | return ret; |
906 | } |
907 | |
908 | static void evict_process_worker(struct work_struct *work) |
909 | { |
910 | int ret; |
911 | struct kfd_process *p; |
912 | struct delayed_work *dwork; |
913 | |
914 | dwork = to_delayed_work(work); |
915 | |
916 | /* Process termination destroys this worker thread. So during the |
917 | * lifetime of this thread, kfd_process p will be valid |
918 | */ |
919 | p = container_of(dwork, struct kfd_process, eviction_work); |
920 | WARN_ONCE(p->last_eviction_seqno != p->ef->seqno, |
921 | "Eviction fence mismatch\n" ); |
922 | |
923 | /* Narrow window of overlap between restore and evict work |
924 | * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos |
925 | * unreserves KFD BOs, it is possible to evicted again. But |
926 | * restore has few more steps of finish. So lets wait for any |
927 | * previous restore work to complete |
928 | */ |
929 | flush_delayed_work(&p->restore_work); |
930 | |
931 | pr_debug("Started evicting pasid %d\n" , p->pasid); |
932 | ret = kfd_process_evict_queues(p); |
933 | if (!ret) { |
934 | dma_fence_signal(p->ef); |
935 | dma_fence_put(p->ef); |
936 | p->ef = NULL; |
937 | queue_delayed_work(kfd_restore_wq, &p->restore_work, |
938 | msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)); |
939 | |
940 | pr_debug("Finished evicting pasid %d\n" , p->pasid); |
941 | } else |
942 | pr_err("Failed to evict queues of pasid %d\n" , p->pasid); |
943 | } |
944 | |
945 | static void restore_process_worker(struct work_struct *work) |
946 | { |
947 | struct delayed_work *dwork; |
948 | struct kfd_process *p; |
949 | struct kfd_process_device *pdd; |
950 | int ret = 0; |
951 | |
952 | dwork = to_delayed_work(work); |
953 | |
954 | /* Process termination destroys this worker thread. So during the |
955 | * lifetime of this thread, kfd_process p will be valid |
956 | */ |
957 | p = container_of(dwork, struct kfd_process, restore_work); |
958 | |
959 | /* Call restore_process_bos on the first KGD device. This function |
960 | * takes care of restoring the whole process including other devices. |
961 | * Restore can fail if enough memory is not available. If so, |
962 | * reschedule again. |
963 | */ |
964 | pdd = list_first_entry(&p->per_device_data, |
965 | struct kfd_process_device, |
966 | per_device_list); |
967 | |
968 | pr_debug("Started restoring pasid %d\n" , p->pasid); |
969 | |
970 | /* Setting last_restore_timestamp before successful restoration. |
971 | * Otherwise this would have to be set by KGD (restore_process_bos) |
972 | * before KFD BOs are unreserved. If not, the process can be evicted |
973 | * again before the timestamp is set. |
974 | * If restore fails, the timestamp will be set again in the next |
975 | * attempt. This would mean that the minimum GPU quanta would be |
976 | * PROCESS_ACTIVE_TIME_MS - (time to execute the following two |
977 | * functions) |
978 | */ |
979 | |
980 | p->last_restore_timestamp = get_jiffies_64(); |
981 | ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info, |
982 | &p->ef); |
983 | if (ret) { |
984 | pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n" , |
985 | p->pasid, PROCESS_BACK_OFF_TIME_MS); |
986 | ret = queue_delayed_work(kfd_restore_wq, &p->restore_work, |
987 | msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS)); |
988 | WARN(!ret, "reschedule restore work failed\n" ); |
989 | return; |
990 | } |
991 | |
992 | ret = kfd_process_restore_queues(p); |
993 | if (!ret) |
994 | pr_debug("Finished restoring pasid %d\n" , p->pasid); |
995 | else |
996 | pr_err("Failed to restore queues of pasid %d\n" , p->pasid); |
997 | } |
998 | |
999 | void kfd_suspend_all_processes(void) |
1000 | { |
1001 | struct kfd_process *p; |
1002 | unsigned int temp; |
1003 | int idx = srcu_read_lock(&kfd_processes_srcu); |
1004 | |
1005 | hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { |
1006 | cancel_delayed_work_sync(&p->eviction_work); |
1007 | cancel_delayed_work_sync(&p->restore_work); |
1008 | |
1009 | if (kfd_process_evict_queues(p)) |
1010 | pr_err("Failed to suspend process %d\n" , p->pasid); |
1011 | dma_fence_signal(p->ef); |
1012 | dma_fence_put(p->ef); |
1013 | p->ef = NULL; |
1014 | } |
1015 | srcu_read_unlock(&kfd_processes_srcu, idx); |
1016 | } |
1017 | |
1018 | int kfd_resume_all_processes(void) |
1019 | { |
1020 | struct kfd_process *p; |
1021 | unsigned int temp; |
1022 | int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu); |
1023 | |
1024 | hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { |
1025 | if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) { |
1026 | pr_err("Restore process %d failed during resume\n" , |
1027 | p->pasid); |
1028 | ret = -EFAULT; |
1029 | } |
1030 | } |
1031 | srcu_read_unlock(&kfd_processes_srcu, idx); |
1032 | return ret; |
1033 | } |
1034 | |
1035 | int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, |
1036 | struct vm_area_struct *vma) |
1037 | { |
1038 | struct kfd_process_device *pdd; |
1039 | struct qcm_process_device *qpd; |
1040 | |
1041 | if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) { |
1042 | pr_err("Incorrect CWSR mapping size.\n" ); |
1043 | return -EINVAL; |
1044 | } |
1045 | |
1046 | pdd = kfd_get_process_device_data(dev, process); |
1047 | if (!pdd) |
1048 | return -EINVAL; |
1049 | qpd = &pdd->qpd; |
1050 | |
1051 | qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
1052 | get_order(KFD_CWSR_TBA_TMA_SIZE)); |
1053 | if (!qpd->cwsr_kaddr) { |
1054 | pr_err("Error allocating per process CWSR buffer.\n" ); |
1055 | return -ENOMEM; |
1056 | } |
1057 | |
1058 | vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND |
1059 | | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP; |
1060 | /* Mapping pages to user process */ |
1061 | return remap_pfn_range(vma, vma->vm_start, |
1062 | PFN_DOWN(__pa(qpd->cwsr_kaddr)), |
1063 | KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot); |
1064 | } |
1065 | |
1066 | void kfd_flush_tlb(struct kfd_process_device *pdd) |
1067 | { |
1068 | struct kfd_dev *dev = pdd->dev; |
1069 | const struct kfd2kgd_calls *f2g = dev->kfd2kgd; |
1070 | |
1071 | if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { |
1072 | /* Nothing to flush until a VMID is assigned, which |
1073 | * only happens when the first queue is created. |
1074 | */ |
1075 | if (pdd->qpd.vmid) |
1076 | f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid); |
1077 | } else { |
1078 | f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid); |
1079 | } |
1080 | } |
1081 | |
1082 | #if defined(CONFIG_DEBUG_FS) |
1083 | |
1084 | int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data) |
1085 | { |
1086 | struct kfd_process *p; |
1087 | unsigned int temp; |
1088 | int r = 0; |
1089 | |
1090 | int idx = srcu_read_lock(&kfd_processes_srcu); |
1091 | |
1092 | hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { |
1093 | seq_printf(m, "Process %d PASID %d:\n" , |
1094 | p->lead_thread->tgid, p->pasid); |
1095 | |
1096 | mutex_lock(&p->mutex); |
1097 | r = pqm_debugfs_mqds(m, &p->pqm); |
1098 | mutex_unlock(&p->mutex); |
1099 | |
1100 | if (r) |
1101 | break; |
1102 | } |
1103 | |
1104 | srcu_read_unlock(&kfd_processes_srcu, idx); |
1105 | |
1106 | return r; |
1107 | } |
1108 | |
1109 | #endif |
1110 | |