1 | // SPDX-License-Identifier: MIT |
2 | /* |
3 | * Copyright 2014 Advanced Micro Devices, Inc. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | */ |
23 | |
24 | #include "amdgpu_amdkfd.h" |
25 | #include "amd_pcie.h" |
26 | #include "amd_shared.h" |
27 | |
28 | #include "amdgpu.h" |
29 | #include "amdgpu_gfx.h" |
30 | #include "amdgpu_dma_buf.h" |
31 | #include <drm/ttm/ttm_tt.h> |
32 | #include <linux/module.h> |
33 | #include <linux/dma-buf.h> |
34 | #include "amdgpu_xgmi.h" |
35 | #include <uapi/linux/kfd_ioctl.h> |
36 | #include "amdgpu_ras.h" |
37 | #include "amdgpu_umc.h" |
38 | #include "amdgpu_reset.h" |
39 | |
40 | /* Total memory size in system memory and all GPU VRAM. Used to |
41 | * estimate worst case amount of memory to reserve for page tables |
42 | */ |
43 | uint64_t amdgpu_amdkfd_total_mem_size; |
44 | |
45 | static bool kfd_initialized; |
46 | |
47 | int amdgpu_amdkfd_init(void) |
48 | { |
49 | struct sysinfo si; |
50 | int ret; |
51 | |
52 | si_meminfo(val: &si); |
53 | amdgpu_amdkfd_total_mem_size = si.freeram - si.freehigh; |
54 | amdgpu_amdkfd_total_mem_size *= si.mem_unit; |
55 | |
56 | ret = kgd2kfd_init(); |
57 | kfd_initialized = !ret; |
58 | |
59 | return ret; |
60 | } |
61 | |
62 | void amdgpu_amdkfd_fini(void) |
63 | { |
64 | if (kfd_initialized) { |
65 | kgd2kfd_exit(); |
66 | kfd_initialized = false; |
67 | } |
68 | } |
69 | |
70 | void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) |
71 | { |
72 | bool vf = amdgpu_sriov_vf(adev); |
73 | |
74 | if (!kfd_initialized) |
75 | return; |
76 | |
77 | adev->kfd.dev = kgd2kfd_probe(adev, vf); |
78 | } |
79 | |
80 | /** |
81 | * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to |
82 | * setup amdkfd |
83 | * |
84 | * @adev: amdgpu_device pointer |
85 | * @aperture_base: output returning doorbell aperture base physical address |
86 | * @aperture_size: output returning doorbell aperture size in bytes |
87 | * @start_offset: output returning # of doorbell bytes reserved for amdgpu. |
88 | * |
89 | * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up, |
90 | * takes doorbells required for its own rings and reports the setup to amdkfd. |
91 | * amdgpu reserved doorbells are at the start of the doorbell aperture. |
92 | */ |
93 | static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, |
94 | phys_addr_t *aperture_base, |
95 | size_t *aperture_size, |
96 | size_t *start_offset) |
97 | { |
98 | /* |
99 | * The first num_kernel_doorbells are used by amdgpu. |
100 | * amdkfd takes whatever's left in the aperture. |
101 | */ |
102 | if (adev->enable_mes) { |
103 | /* |
104 | * With MES enabled, we only need to initialize |
105 | * the base address. The size and offset are |
106 | * not initialized as AMDGPU manages the whole |
107 | * doorbell space. |
108 | */ |
109 | *aperture_base = adev->doorbell.base; |
110 | *aperture_size = 0; |
111 | *start_offset = 0; |
112 | } else if (adev->doorbell.size > adev->doorbell.num_kernel_doorbells * |
113 | sizeof(u32)) { |
114 | *aperture_base = adev->doorbell.base; |
115 | *aperture_size = adev->doorbell.size; |
116 | *start_offset = adev->doorbell.num_kernel_doorbells * sizeof(u32); |
117 | } else { |
118 | *aperture_base = 0; |
119 | *aperture_size = 0; |
120 | *start_offset = 0; |
121 | } |
122 | } |
123 | |
124 | |
125 | static void amdgpu_amdkfd_reset_work(struct work_struct *work) |
126 | { |
127 | struct amdgpu_device *adev = container_of(work, struct amdgpu_device, |
128 | kfd.reset_work); |
129 | |
130 | struct amdgpu_reset_context reset_context; |
131 | |
132 | memset(&reset_context, 0, sizeof(reset_context)); |
133 | |
134 | reset_context.method = AMD_RESET_METHOD_NONE; |
135 | reset_context.reset_req_dev = adev; |
136 | clear_bit(nr: AMDGPU_NEED_FULL_RESET, addr: &reset_context.flags); |
137 | |
138 | amdgpu_device_gpu_recover(adev, NULL, reset_context: &reset_context); |
139 | } |
140 | |
141 | void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) |
142 | { |
143 | int i; |
144 | int last_valid_bit; |
145 | |
146 | amdgpu_amdkfd_gpuvm_init_mem_limits(); |
147 | |
148 | if (adev->kfd.dev) { |
149 | struct kgd2kfd_shared_resources gpu_resources = { |
150 | .compute_vmid_bitmap = |
151 | ((1 << AMDGPU_NUM_VMID) - 1) - |
152 | ((1 << adev->vm_manager.first_kfd_vmid) - 1), |
153 | .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec, |
154 | .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe, |
155 | .gpuvm_size = min(adev->vm_manager.max_pfn |
156 | << AMDGPU_GPU_PAGE_SHIFT, |
157 | AMDGPU_GMC_HOLE_START), |
158 | .drm_render_minor = adev_to_drm(adev)->render->index, |
159 | .sdma_doorbell_idx = adev->doorbell_index.sdma_engine, |
160 | .enable_mes = adev->enable_mes, |
161 | }; |
162 | |
163 | /* this is going to have a few of the MSBs set that we need to |
164 | * clear |
165 | */ |
166 | bitmap_complement(dst: gpu_resources.cp_queue_bitmap, |
167 | src: adev->gfx.mec_bitmap[0].queue_bitmap, |
168 | AMDGPU_MAX_QUEUES); |
169 | |
170 | /* According to linux/bitmap.h we shouldn't use bitmap_clear if |
171 | * nbits is not compile time constant |
172 | */ |
173 | last_valid_bit = 1 /* only first MEC can have compute queues */ |
174 | * adev->gfx.mec.num_pipe_per_mec |
175 | * adev->gfx.mec.num_queue_per_pipe; |
176 | for (i = last_valid_bit; i < AMDGPU_MAX_QUEUES; ++i) |
177 | clear_bit(nr: i, addr: gpu_resources.cp_queue_bitmap); |
178 | |
179 | amdgpu_doorbell_get_kfd_info(adev, |
180 | aperture_base: &gpu_resources.doorbell_physical_address, |
181 | aperture_size: &gpu_resources.doorbell_aperture_size, |
182 | start_offset: &gpu_resources.doorbell_start_offset); |
183 | |
184 | /* Since SOC15, BIF starts to statically use the |
185 | * lower 12 bits of doorbell addresses for routing |
186 | * based on settings in registers like |
187 | * SDMA0_DOORBELL_RANGE etc.. |
188 | * In order to route a doorbell to CP engine, the lower |
189 | * 12 bits of its address has to be outside the range |
190 | * set for SDMA, VCN, and IH blocks. |
191 | */ |
192 | if (adev->asic_type >= CHIP_VEGA10) { |
193 | gpu_resources.non_cp_doorbells_start = |
194 | adev->doorbell_index.first_non_cp; |
195 | gpu_resources.non_cp_doorbells_end = |
196 | adev->doorbell_index.last_non_cp; |
197 | } |
198 | |
199 | adev->kfd.init_complete = kgd2kfd_device_init(kfd: adev->kfd.dev, |
200 | gpu_resources: &gpu_resources); |
201 | |
202 | amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; |
203 | |
204 | INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work); |
205 | } |
206 | } |
207 | |
208 | void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev) |
209 | { |
210 | if (adev->kfd.dev) { |
211 | kgd2kfd_device_exit(kfd: adev->kfd.dev); |
212 | adev->kfd.dev = NULL; |
213 | amdgpu_amdkfd_total_mem_size -= adev->gmc.real_vram_size; |
214 | } |
215 | } |
216 | |
217 | void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, |
218 | const void *ih_ring_entry) |
219 | { |
220 | if (adev->kfd.dev) |
221 | kgd2kfd_interrupt(kfd: adev->kfd.dev, ih_ring_entry); |
222 | } |
223 | |
224 | void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm) |
225 | { |
226 | if (adev->kfd.dev) |
227 | kgd2kfd_suspend(kfd: adev->kfd.dev, run_pm); |
228 | } |
229 | |
230 | int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm) |
231 | { |
232 | int r = 0; |
233 | |
234 | if (adev->kfd.dev) |
235 | r = kgd2kfd_resume(kfd: adev->kfd.dev, run_pm); |
236 | |
237 | return r; |
238 | } |
239 | |
240 | int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev) |
241 | { |
242 | int r = 0; |
243 | |
244 | if (adev->kfd.dev) |
245 | r = kgd2kfd_pre_reset(kfd: adev->kfd.dev); |
246 | |
247 | return r; |
248 | } |
249 | |
250 | int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev) |
251 | { |
252 | int r = 0; |
253 | |
254 | if (adev->kfd.dev) |
255 | r = kgd2kfd_post_reset(kfd: adev->kfd.dev); |
256 | |
257 | return r; |
258 | } |
259 | |
260 | void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev) |
261 | { |
262 | if (amdgpu_device_should_recover_gpu(adev)) |
263 | amdgpu_reset_domain_schedule(domain: adev->reset_domain, |
264 | work: &adev->kfd.reset_work); |
265 | } |
266 | |
267 | int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, |
268 | void **mem_obj, uint64_t *gpu_addr, |
269 | void **cpu_ptr, bool cp_mqd_gfx9) |
270 | { |
271 | struct amdgpu_bo *bo = NULL; |
272 | struct amdgpu_bo_param bp; |
273 | int r; |
274 | void *cpu_ptr_tmp = NULL; |
275 | |
276 | memset(&bp, 0, sizeof(bp)); |
277 | bp.size = size; |
278 | bp.byte_align = PAGE_SIZE; |
279 | bp.domain = AMDGPU_GEM_DOMAIN_GTT; |
280 | bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; |
281 | bp.type = ttm_bo_type_kernel; |
282 | bp.resv = NULL; |
283 | bp.bo_ptr_size = sizeof(struct amdgpu_bo); |
284 | |
285 | if (cp_mqd_gfx9) |
286 | bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9; |
287 | |
288 | r = amdgpu_bo_create(adev, bp: &bp, bo_ptr: &bo); |
289 | if (r) { |
290 | dev_err(adev->dev, |
291 | "failed to allocate BO for amdkfd (%d)\n" , r); |
292 | return r; |
293 | } |
294 | |
295 | /* map the buffer */ |
296 | r = amdgpu_bo_reserve(bo, no_intr: true); |
297 | if (r) { |
298 | dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n" , r); |
299 | goto allocate_mem_reserve_bo_failed; |
300 | } |
301 | |
302 | r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); |
303 | if (r) { |
304 | dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n" , r); |
305 | goto allocate_mem_pin_bo_failed; |
306 | } |
307 | |
308 | r = amdgpu_ttm_alloc_gart(bo: &bo->tbo); |
309 | if (r) { |
310 | dev_err(adev->dev, "%p bind failed\n" , bo); |
311 | goto allocate_mem_kmap_bo_failed; |
312 | } |
313 | |
314 | r = amdgpu_bo_kmap(bo, ptr: &cpu_ptr_tmp); |
315 | if (r) { |
316 | dev_err(adev->dev, |
317 | "(%d) failed to map bo to kernel for amdkfd\n" , r); |
318 | goto allocate_mem_kmap_bo_failed; |
319 | } |
320 | |
321 | *mem_obj = bo; |
322 | *gpu_addr = amdgpu_bo_gpu_offset(bo); |
323 | *cpu_ptr = cpu_ptr_tmp; |
324 | |
325 | amdgpu_bo_unreserve(bo); |
326 | |
327 | return 0; |
328 | |
329 | allocate_mem_kmap_bo_failed: |
330 | amdgpu_bo_unpin(bo); |
331 | allocate_mem_pin_bo_failed: |
332 | amdgpu_bo_unreserve(bo); |
333 | allocate_mem_reserve_bo_failed: |
334 | amdgpu_bo_unref(bo: &bo); |
335 | |
336 | return r; |
337 | } |
338 | |
339 | void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj) |
340 | { |
341 | struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj; |
342 | |
343 | amdgpu_bo_reserve(bo, no_intr: true); |
344 | amdgpu_bo_kunmap(bo); |
345 | amdgpu_bo_unpin(bo); |
346 | amdgpu_bo_unreserve(bo); |
347 | amdgpu_bo_unref(bo: &(bo)); |
348 | } |
349 | |
350 | int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, |
351 | void **mem_obj) |
352 | { |
353 | struct amdgpu_bo *bo = NULL; |
354 | struct amdgpu_bo_user *ubo; |
355 | struct amdgpu_bo_param bp; |
356 | int r; |
357 | |
358 | memset(&bp, 0, sizeof(bp)); |
359 | bp.size = size; |
360 | bp.byte_align = 1; |
361 | bp.domain = AMDGPU_GEM_DOMAIN_GWS; |
362 | bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; |
363 | bp.type = ttm_bo_type_device; |
364 | bp.resv = NULL; |
365 | bp.bo_ptr_size = sizeof(struct amdgpu_bo); |
366 | |
367 | r = amdgpu_bo_create_user(adev, bp: &bp, ubo_ptr: &ubo); |
368 | if (r) { |
369 | dev_err(adev->dev, |
370 | "failed to allocate gws BO for amdkfd (%d)\n" , r); |
371 | return r; |
372 | } |
373 | |
374 | bo = &ubo->bo; |
375 | *mem_obj = bo; |
376 | return 0; |
377 | } |
378 | |
379 | void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj) |
380 | { |
381 | struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj; |
382 | |
383 | amdgpu_bo_unref(bo: &bo); |
384 | } |
385 | |
386 | uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, |
387 | enum kgd_engine_type type) |
388 | { |
389 | switch (type) { |
390 | case KGD_ENGINE_PFP: |
391 | return adev->gfx.pfp_fw_version; |
392 | |
393 | case KGD_ENGINE_ME: |
394 | return adev->gfx.me_fw_version; |
395 | |
396 | case KGD_ENGINE_CE: |
397 | return adev->gfx.ce_fw_version; |
398 | |
399 | case KGD_ENGINE_MEC1: |
400 | return adev->gfx.mec_fw_version; |
401 | |
402 | case KGD_ENGINE_MEC2: |
403 | return adev->gfx.mec2_fw_version; |
404 | |
405 | case KGD_ENGINE_RLC: |
406 | return adev->gfx.rlc_fw_version; |
407 | |
408 | case KGD_ENGINE_SDMA1: |
409 | return adev->sdma.instance[0].fw_version; |
410 | |
411 | case KGD_ENGINE_SDMA2: |
412 | return adev->sdma.instance[1].fw_version; |
413 | |
414 | default: |
415 | return 0; |
416 | } |
417 | |
418 | return 0; |
419 | } |
420 | |
421 | void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, |
422 | struct kfd_local_mem_info *mem_info, |
423 | struct amdgpu_xcp *xcp) |
424 | { |
425 | memset(mem_info, 0, sizeof(*mem_info)); |
426 | |
427 | if (xcp) { |
428 | if (adev->gmc.real_vram_size == adev->gmc.visible_vram_size) |
429 | mem_info->local_mem_size_public = |
430 | KFD_XCP_MEMORY_SIZE(adev, xcp->id); |
431 | else |
432 | mem_info->local_mem_size_private = |
433 | KFD_XCP_MEMORY_SIZE(adev, xcp->id); |
434 | } else { |
435 | mem_info->local_mem_size_public = adev->gmc.visible_vram_size; |
436 | mem_info->local_mem_size_private = adev->gmc.real_vram_size - |
437 | adev->gmc.visible_vram_size; |
438 | } |
439 | mem_info->vram_width = adev->gmc.vram_width; |
440 | |
441 | pr_debug("Address base: %pap public 0x%llx private 0x%llx\n" , |
442 | &adev->gmc.aper_base, |
443 | mem_info->local_mem_size_public, |
444 | mem_info->local_mem_size_private); |
445 | |
446 | if (adev->pm.dpm_enabled) { |
447 | if (amdgpu_emu_mode == 1) |
448 | mem_info->mem_clk_max = 0; |
449 | else |
450 | mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, low: false) / 100; |
451 | } else |
452 | mem_info->mem_clk_max = 100; |
453 | } |
454 | |
455 | uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev) |
456 | { |
457 | if (adev->gfx.funcs->get_gpu_clock_counter) |
458 | return adev->gfx.funcs->get_gpu_clock_counter(adev); |
459 | return 0; |
460 | } |
461 | |
462 | uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev) |
463 | { |
464 | /* the sclk is in quantas of 10kHz */ |
465 | if (adev->pm.dpm_enabled) |
466 | return amdgpu_dpm_get_sclk(adev, low: false) / 100; |
467 | else |
468 | return 100; |
469 | } |
470 | |
471 | int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, |
472 | struct amdgpu_device **dmabuf_adev, |
473 | uint64_t *bo_size, void *metadata_buffer, |
474 | size_t buffer_size, uint32_t *metadata_size, |
475 | uint32_t *flags, int8_t *xcp_id) |
476 | { |
477 | struct dma_buf *dma_buf; |
478 | struct drm_gem_object *obj; |
479 | struct amdgpu_bo *bo; |
480 | uint64_t metadata_flags; |
481 | int r = -EINVAL; |
482 | |
483 | dma_buf = dma_buf_get(fd: dma_buf_fd); |
484 | if (IS_ERR(ptr: dma_buf)) |
485 | return PTR_ERR(ptr: dma_buf); |
486 | |
487 | if (dma_buf->ops != &amdgpu_dmabuf_ops) |
488 | /* Can't handle non-graphics buffers */ |
489 | goto out_put; |
490 | |
491 | obj = dma_buf->priv; |
492 | if (obj->dev->driver != adev_to_drm(adev)->driver) |
493 | /* Can't handle buffers from different drivers */ |
494 | goto out_put; |
495 | |
496 | adev = drm_to_adev(ddev: obj->dev); |
497 | bo = gem_to_amdgpu_bo(obj); |
498 | if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | |
499 | AMDGPU_GEM_DOMAIN_GTT))) |
500 | /* Only VRAM and GTT BOs are supported */ |
501 | goto out_put; |
502 | |
503 | r = 0; |
504 | if (dmabuf_adev) |
505 | *dmabuf_adev = adev; |
506 | if (bo_size) |
507 | *bo_size = amdgpu_bo_size(bo); |
508 | if (metadata_buffer) |
509 | r = amdgpu_bo_get_metadata(bo, buffer: metadata_buffer, buffer_size, |
510 | metadata_size, flags: &metadata_flags); |
511 | if (flags) { |
512 | *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? |
513 | KFD_IOC_ALLOC_MEM_FLAGS_VRAM |
514 | : KFD_IOC_ALLOC_MEM_FLAGS_GTT; |
515 | |
516 | if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) |
517 | *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC; |
518 | } |
519 | if (xcp_id) |
520 | *xcp_id = bo->xcp_id; |
521 | |
522 | out_put: |
523 | dma_buf_put(dmabuf: dma_buf); |
524 | return r; |
525 | } |
526 | |
527 | uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, |
528 | struct amdgpu_device *src) |
529 | { |
530 | struct amdgpu_device *peer_adev = src; |
531 | struct amdgpu_device *adev = dst; |
532 | int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev); |
533 | |
534 | if (ret < 0) { |
535 | DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n" , |
536 | adev->gmc.xgmi.physical_node_id, |
537 | peer_adev->gmc.xgmi.physical_node_id, ret); |
538 | ret = 0; |
539 | } |
540 | return (uint8_t)ret; |
541 | } |
542 | |
543 | int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, |
544 | struct amdgpu_device *src, |
545 | bool is_min) |
546 | { |
547 | struct amdgpu_device *adev = dst, *peer_adev; |
548 | int num_links; |
549 | |
550 | if (adev->asic_type != CHIP_ALDEBARAN) |
551 | return 0; |
552 | |
553 | if (src) |
554 | peer_adev = src; |
555 | |
556 | /* num links returns 0 for indirect peers since indirect route is unknown. */ |
557 | num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev); |
558 | if (num_links < 0) { |
559 | DRM_ERROR("amdgpu: failed to get xgmi num links between node %d and %d. ret = %d\n" , |
560 | adev->gmc.xgmi.physical_node_id, |
561 | peer_adev->gmc.xgmi.physical_node_id, num_links); |
562 | num_links = 0; |
563 | } |
564 | |
565 | /* Aldebaran xGMI DPM is defeatured so assume x16 x 25Gbps for bandwidth. */ |
566 | return (num_links * 16 * 25000)/BITS_PER_BYTE; |
567 | } |
568 | |
569 | int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min) |
570 | { |
571 | int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) : |
572 | fls(x: adev->pm.pcie_mlw_mask)) - 1; |
573 | int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask & |
574 | CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) : |
575 | fls(x: adev->pm.pcie_gen_mask & |
576 | CAIL_PCIE_LINK_SPEED_SUPPORT_MASK)) - 1; |
577 | uint32_t num_lanes_mask = 1 << num_lanes_shift; |
578 | uint32_t gen_speed_mask = 1 << gen_speed_shift; |
579 | int num_lanes_factor = 0, gen_speed_mbits_factor = 0; |
580 | |
581 | switch (num_lanes_mask) { |
582 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X1: |
583 | num_lanes_factor = 1; |
584 | break; |
585 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X2: |
586 | num_lanes_factor = 2; |
587 | break; |
588 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X4: |
589 | num_lanes_factor = 4; |
590 | break; |
591 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X8: |
592 | num_lanes_factor = 8; |
593 | break; |
594 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X12: |
595 | num_lanes_factor = 12; |
596 | break; |
597 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X16: |
598 | num_lanes_factor = 16; |
599 | break; |
600 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X32: |
601 | num_lanes_factor = 32; |
602 | break; |
603 | } |
604 | |
605 | switch (gen_speed_mask) { |
606 | case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1: |
607 | gen_speed_mbits_factor = 2500; |
608 | break; |
609 | case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2: |
610 | gen_speed_mbits_factor = 5000; |
611 | break; |
612 | case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3: |
613 | gen_speed_mbits_factor = 8000; |
614 | break; |
615 | case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4: |
616 | gen_speed_mbits_factor = 16000; |
617 | break; |
618 | case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5: |
619 | gen_speed_mbits_factor = 32000; |
620 | break; |
621 | } |
622 | |
623 | return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE; |
624 | } |
625 | |
626 | int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, |
627 | enum kgd_engine_type engine, |
628 | uint32_t vmid, uint64_t gpu_addr, |
629 | uint32_t *ib_cmd, uint32_t ib_len) |
630 | { |
631 | struct amdgpu_job *job; |
632 | struct amdgpu_ib *ib; |
633 | struct amdgpu_ring *ring; |
634 | struct dma_fence *f = NULL; |
635 | int ret; |
636 | |
637 | switch (engine) { |
638 | case KGD_ENGINE_MEC1: |
639 | ring = &adev->gfx.compute_ring[0]; |
640 | break; |
641 | case KGD_ENGINE_SDMA1: |
642 | ring = &adev->sdma.instance[0].ring; |
643 | break; |
644 | case KGD_ENGINE_SDMA2: |
645 | ring = &adev->sdma.instance[1].ring; |
646 | break; |
647 | default: |
648 | pr_err("Invalid engine in IB submission: %d\n" , engine); |
649 | ret = -EINVAL; |
650 | goto err; |
651 | } |
652 | |
653 | ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, num_ibs: 1, job: &job); |
654 | if (ret) |
655 | goto err; |
656 | |
657 | ib = &job->ibs[0]; |
658 | memset(ib, 0, sizeof(struct amdgpu_ib)); |
659 | |
660 | ib->gpu_addr = gpu_addr; |
661 | ib->ptr = ib_cmd; |
662 | ib->length_dw = ib_len; |
663 | /* This works for NO_HWS. TODO: need to handle without knowing VMID */ |
664 | job->vmid = vmid; |
665 | job->num_ibs = 1; |
666 | |
667 | ret = amdgpu_ib_schedule(ring, num_ibs: 1, ibs: ib, job, f: &f); |
668 | |
669 | if (ret) { |
670 | DRM_ERROR("amdgpu: failed to schedule IB.\n" ); |
671 | goto err_ib_sched; |
672 | } |
673 | |
674 | /* Drop the initial kref_init count (see drm_sched_main as example) */ |
675 | dma_fence_put(fence: f); |
676 | ret = dma_fence_wait(fence: f, intr: false); |
677 | |
678 | err_ib_sched: |
679 | amdgpu_job_free(job); |
680 | err: |
681 | return ret; |
682 | } |
683 | |
684 | void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle) |
685 | { |
686 | enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE; |
687 | /* Temporary workaround to fix issues observed in some |
688 | * compute applications when GFXOFF is enabled on GFX11. |
689 | */ |
690 | if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11) { |
691 | pr_debug("GFXOFF is %s\n" , idle ? "enabled" : "disabled" ); |
692 | amdgpu_gfx_off_ctrl(adev, enable: idle); |
693 | } else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) && |
694 | (adev->flags & AMD_IS_APU)) { |
695 | /* Disable GFXOFF and PG. Temporary workaround |
696 | * to fix some compute applications issue on GFX9. |
697 | */ |
698 | adev->ip_blocks[AMD_IP_BLOCK_TYPE_GFX].version->funcs->set_powergating_state((void *)adev, state); |
699 | } |
700 | amdgpu_dpm_switch_power_profile(adev, |
701 | type: PP_SMC_POWER_PROFILE_COMPUTE, |
702 | en: !idle); |
703 | } |
704 | |
705 | bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) |
706 | { |
707 | if (adev->kfd.dev) |
708 | return vmid >= adev->vm_manager.first_kfd_vmid; |
709 | |
710 | return false; |
711 | } |
712 | |
713 | int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, |
714 | uint16_t vmid) |
715 | { |
716 | if (adev->family == AMDGPU_FAMILY_AI) { |
717 | int i; |
718 | |
719 | for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) |
720 | amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub: i, flush_type: 0); |
721 | } else { |
722 | amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), flush_type: 0); |
723 | } |
724 | |
725 | return 0; |
726 | } |
727 | |
728 | int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, |
729 | uint16_t pasid, |
730 | enum TLB_FLUSH_TYPE flush_type, |
731 | uint32_t inst) |
732 | { |
733 | bool all_hub = false; |
734 | |
735 | if (adev->family == AMDGPU_FAMILY_AI || |
736 | adev->family == AMDGPU_FAMILY_RV) |
737 | all_hub = true; |
738 | |
739 | return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub, inst); |
740 | } |
741 | |
742 | bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev) |
743 | { |
744 | return adev->have_atomics_support; |
745 | } |
746 | |
747 | void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev) |
748 | { |
749 | amdgpu_device_flush_hdp(adev, NULL); |
750 | } |
751 | |
752 | void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset) |
753 | { |
754 | amdgpu_umc_poison_handler(adev, reset); |
755 | } |
756 | |
757 | int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, |
758 | uint32_t *payload) |
759 | { |
760 | int ret; |
761 | |
762 | /* Device or IH ring is not ready so bail. */ |
763 | ret = amdgpu_ih_wait_on_checkpoint_process_ts(adev, ih: &adev->irq.ih); |
764 | if (ret) |
765 | return ret; |
766 | |
767 | /* Send payload to fence KFD interrupts */ |
768 | amdgpu_amdkfd_interrupt(adev, ih_ring_entry: payload); |
769 | |
770 | return 0; |
771 | } |
772 | |
773 | bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev) |
774 | { |
775 | if (adev->gfx.ras && adev->gfx.ras->query_utcl2_poison_status) |
776 | return adev->gfx.ras->query_utcl2_poison_status(adev); |
777 | else |
778 | return false; |
779 | } |
780 | |
781 | int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev) |
782 | { |
783 | return kgd2kfd_check_and_lock_kfd(); |
784 | } |
785 | |
786 | void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev) |
787 | { |
788 | kgd2kfd_unlock_kfd(); |
789 | } |
790 | |
791 | |
792 | u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id) |
793 | { |
794 | s8 mem_id = KFD_XCP_MEM_ID(adev, xcp_id); |
795 | u64 tmp; |
796 | |
797 | if (adev->gmc.num_mem_partitions && xcp_id >= 0 && mem_id >= 0) { |
798 | if (adev->gmc.is_app_apu && adev->gmc.num_mem_partitions == 1) { |
799 | /* In NPS1 mode, we should restrict the vram reporting |
800 | * tied to the ttm_pages_limit which is 1/2 of the system |
801 | * memory. For other partition modes, the HBM is uniformly |
802 | * divided already per numa node reported. If user wants to |
803 | * go beyond the default ttm limit and maximize the ROCm |
804 | * allocations, they can go up to max ttm and sysmem limits. |
805 | */ |
806 | |
807 | tmp = (ttm_tt_pages_limit() << PAGE_SHIFT) / num_online_nodes(); |
808 | } else { |
809 | tmp = adev->gmc.mem_partitions[mem_id].size; |
810 | } |
811 | do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition); |
812 | return ALIGN_DOWN(tmp, PAGE_SIZE); |
813 | } else { |
814 | return adev->gmc.real_vram_size; |
815 | } |
816 | } |
817 | |
818 | int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, |
819 | u32 inst) |
820 | { |
821 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; |
822 | struct amdgpu_ring *kiq_ring = &kiq->ring; |
823 | struct amdgpu_ring_funcs *ring_funcs; |
824 | struct amdgpu_ring *ring; |
825 | int r = 0; |
826 | |
827 | if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) |
828 | return -EINVAL; |
829 | |
830 | ring_funcs = kzalloc(size: sizeof(*ring_funcs), GFP_KERNEL); |
831 | if (!ring_funcs) |
832 | return -ENOMEM; |
833 | |
834 | ring = kzalloc(size: sizeof(*ring), GFP_KERNEL); |
835 | if (!ring) { |
836 | r = -ENOMEM; |
837 | goto free_ring_funcs; |
838 | } |
839 | |
840 | ring_funcs->type = AMDGPU_RING_TYPE_COMPUTE; |
841 | ring->doorbell_index = doorbell_off; |
842 | ring->funcs = ring_funcs; |
843 | |
844 | spin_lock(lock: &kiq->ring_lock); |
845 | |
846 | if (amdgpu_ring_alloc(ring: kiq_ring, ndw: kiq->pmf->unmap_queues_size)) { |
847 | spin_unlock(lock: &kiq->ring_lock); |
848 | r = -ENOMEM; |
849 | goto free_ring; |
850 | } |
851 | |
852 | kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0); |
853 | |
854 | if (kiq_ring->sched.ready && !adev->job_hang) |
855 | r = amdgpu_ring_test_helper(ring: kiq_ring); |
856 | |
857 | spin_unlock(lock: &kiq->ring_lock); |
858 | |
859 | free_ring: |
860 | kfree(objp: ring); |
861 | |
862 | free_ring_funcs: |
863 | kfree(objp: ring_funcs); |
864 | |
865 | return r; |
866 | } |
867 | |