1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
2 | /* |
3 | * Copyright 2014-2022 Advanced Micro Devices, Inc. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * |
23 | */ |
24 | |
25 | #include <linux/ratelimit.h> |
26 | #include <linux/printk.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/list.h> |
29 | #include <linux/types.h> |
30 | #include <linux/bitops.h> |
31 | #include <linux/sched.h> |
32 | #include "kfd_priv.h" |
33 | #include "kfd_device_queue_manager.h" |
34 | #include "kfd_mqd_manager.h" |
35 | #include "cik_regs.h" |
36 | #include "kfd_kernel_queue.h" |
37 | #include "amdgpu_amdkfd.h" |
38 | #include "mes_api_def.h" |
39 | #include "kfd_debug.h" |
40 | |
41 | /* Size of the per-pipe EOP queue */ |
42 | #define CIK_HPD_EOP_BYTES_LOG2 11 |
43 | #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2) |
44 | |
45 | static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, |
46 | u32 pasid, unsigned int vmid); |
47 | |
48 | static int execute_queues_cpsch(struct device_queue_manager *dqm, |
49 | enum kfd_unmap_queues_filter filter, |
50 | uint32_t filter_param, |
51 | uint32_t grace_period); |
52 | static int unmap_queues_cpsch(struct device_queue_manager *dqm, |
53 | enum kfd_unmap_queues_filter filter, |
54 | uint32_t filter_param, |
55 | uint32_t grace_period, |
56 | bool reset); |
57 | |
58 | static int map_queues_cpsch(struct device_queue_manager *dqm); |
59 | |
60 | static void deallocate_sdma_queue(struct device_queue_manager *dqm, |
61 | struct queue *q); |
62 | |
63 | static inline void deallocate_hqd(struct device_queue_manager *dqm, |
64 | struct queue *q); |
65 | static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q); |
66 | static int allocate_sdma_queue(struct device_queue_manager *dqm, |
67 | struct queue *q, const uint32_t *restore_sdma_id); |
68 | static void kfd_process_hw_exception(struct work_struct *work); |
69 | |
70 | static inline |
71 | enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type) |
72 | { |
73 | if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI) |
74 | return KFD_MQD_TYPE_SDMA; |
75 | return KFD_MQD_TYPE_CP; |
76 | } |
77 | |
78 | static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) |
79 | { |
80 | int i; |
81 | int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec |
82 | + pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe; |
83 | |
84 | /* queue is available for KFD usage if bit is 1 */ |
85 | for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i) |
86 | if (test_bit(pipe_offset + i, |
87 | dqm->dev->kfd->shared_resources.cp_queue_bitmap)) |
88 | return true; |
89 | return false; |
90 | } |
91 | |
92 | unsigned int get_cp_queues_num(struct device_queue_manager *dqm) |
93 | { |
94 | return bitmap_weight(src: dqm->dev->kfd->shared_resources.cp_queue_bitmap, |
95 | AMDGPU_MAX_QUEUES); |
96 | } |
97 | |
98 | unsigned int get_queues_per_pipe(struct device_queue_manager *dqm) |
99 | { |
100 | return dqm->dev->kfd->shared_resources.num_queue_per_pipe; |
101 | } |
102 | |
103 | unsigned int get_pipes_per_mec(struct device_queue_manager *dqm) |
104 | { |
105 | return dqm->dev->kfd->shared_resources.num_pipe_per_mec; |
106 | } |
107 | |
108 | static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) |
109 | { |
110 | return kfd_get_num_sdma_engines(kdev: dqm->dev) + |
111 | kfd_get_num_xgmi_sdma_engines(kdev: dqm->dev); |
112 | } |
113 | |
114 | unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) |
115 | { |
116 | return kfd_get_num_sdma_engines(kdev: dqm->dev) * |
117 | dqm->dev->kfd->device_info.num_sdma_queues_per_engine; |
118 | } |
119 | |
120 | unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm) |
121 | { |
122 | return kfd_get_num_xgmi_sdma_engines(kdev: dqm->dev) * |
123 | dqm->dev->kfd->device_info.num_sdma_queues_per_engine; |
124 | } |
125 | |
126 | static void init_sdma_bitmaps(struct device_queue_manager *dqm) |
127 | { |
128 | bitmap_zero(dst: dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES); |
129 | bitmap_set(map: dqm->sdma_bitmap, start: 0, nbits: get_num_sdma_queues(dqm)); |
130 | |
131 | bitmap_zero(dst: dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES); |
132 | bitmap_set(map: dqm->xgmi_sdma_bitmap, start: 0, nbits: get_num_xgmi_sdma_queues(dqm)); |
133 | |
134 | /* Mask out the reserved queues */ |
135 | bitmap_andnot(dst: dqm->sdma_bitmap, src1: dqm->sdma_bitmap, |
136 | src2: dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap, |
137 | KFD_MAX_SDMA_QUEUES); |
138 | } |
139 | |
140 | void program_sh_mem_settings(struct device_queue_manager *dqm, |
141 | struct qcm_process_device *qpd) |
142 | { |
143 | uint32_t xcc_mask = dqm->dev->xcc_mask; |
144 | int xcc_id; |
145 | |
146 | for_each_inst(xcc_id, xcc_mask) |
147 | dqm->dev->kfd2kgd->program_sh_mem_settings( |
148 | dqm->dev->adev, qpd->vmid, qpd->sh_mem_config, |
149 | qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit, |
150 | qpd->sh_mem_bases, xcc_id); |
151 | } |
152 | |
153 | static void kfd_hws_hang(struct device_queue_manager *dqm) |
154 | { |
155 | /* |
156 | * Issue a GPU reset if HWS is unresponsive |
157 | */ |
158 | dqm->is_hws_hang = true; |
159 | |
160 | /* It's possible we're detecting a HWS hang in the |
161 | * middle of a GPU reset. No need to schedule another |
162 | * reset in this case. |
163 | */ |
164 | if (!dqm->is_resetting) |
165 | schedule_work(work: &dqm->hw_exception_work); |
166 | } |
167 | |
168 | static int convert_to_mes_queue_type(int queue_type) |
169 | { |
170 | int mes_queue_type; |
171 | |
172 | switch (queue_type) { |
173 | case KFD_QUEUE_TYPE_COMPUTE: |
174 | mes_queue_type = MES_QUEUE_TYPE_COMPUTE; |
175 | break; |
176 | case KFD_QUEUE_TYPE_SDMA: |
177 | mes_queue_type = MES_QUEUE_TYPE_SDMA; |
178 | break; |
179 | default: |
180 | WARN(1, "Invalid queue type %d" , queue_type); |
181 | mes_queue_type = -EINVAL; |
182 | break; |
183 | } |
184 | |
185 | return mes_queue_type; |
186 | } |
187 | |
188 | static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, |
189 | struct qcm_process_device *qpd) |
190 | { |
191 | struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev; |
192 | struct kfd_process_device *pdd = qpd_to_pdd(qpd); |
193 | struct mes_add_queue_input queue_input; |
194 | int r, queue_type; |
195 | uint64_t wptr_addr_off; |
196 | |
197 | if (dqm->is_hws_hang) |
198 | return -EIO; |
199 | |
200 | memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input)); |
201 | queue_input.process_id = qpd->pqm->process->pasid; |
202 | queue_input.page_table_base_addr = qpd->page_table_base; |
203 | queue_input.process_va_start = 0; |
204 | queue_input.process_va_end = adev->vm_manager.max_pfn - 1; |
205 | /* MES unit for quantum is 100ns */ |
206 | queue_input.process_quantum = KFD_MES_PROCESS_QUANTUM; /* Equivalent to 10ms. */ |
207 | queue_input.process_context_addr = pdd->proc_ctx_gpu_addr; |
208 | queue_input.gang_quantum = KFD_MES_GANG_QUANTUM; /* Equivalent to 1ms */ |
209 | queue_input.gang_context_addr = q->gang_ctx_gpu_addr; |
210 | queue_input.inprocess_gang_priority = q->properties.priority; |
211 | queue_input.gang_global_priority_level = |
212 | AMDGPU_MES_PRIORITY_LEVEL_NORMAL; |
213 | queue_input.doorbell_offset = q->properties.doorbell_off; |
214 | queue_input.mqd_addr = q->gart_mqd_addr; |
215 | queue_input.wptr_addr = (uint64_t)q->properties.write_ptr; |
216 | |
217 | if (q->wptr_bo) { |
218 | wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1); |
219 | queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(bo: q->wptr_bo) + wptr_addr_off; |
220 | } |
221 | |
222 | queue_input.is_kfd_process = 1; |
223 | queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL); |
224 | queue_input.queue_size = q->properties.queue_size >> 2; |
225 | |
226 | queue_input.paging = false; |
227 | queue_input.tba_addr = qpd->tba_addr; |
228 | queue_input.tma_addr = qpd->tma_addr; |
229 | queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(dev: q->device); |
230 | queue_input.skip_process_ctx_clear = |
231 | qpd->pqm->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED && |
232 | (qpd->pqm->process->debug_trap_enabled || |
233 | kfd_dbg_has_ttmps_always_setup(dev: q->device)); |
234 | |
235 | queue_type = convert_to_mes_queue_type(queue_type: q->properties.type); |
236 | if (queue_type < 0) { |
237 | dev_err(adev->dev, "Queue type not supported with MES, queue:%d\n" , |
238 | q->properties.type); |
239 | return -EINVAL; |
240 | } |
241 | queue_input.queue_type = (uint32_t)queue_type; |
242 | |
243 | queue_input.exclusively_scheduled = q->properties.is_gws; |
244 | |
245 | amdgpu_mes_lock(mes: &adev->mes); |
246 | r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); |
247 | amdgpu_mes_unlock(mes: &adev->mes); |
248 | if (r) { |
249 | dev_err(adev->dev, "failed to add hardware queue to MES, doorbell=0x%x\n" , |
250 | q->properties.doorbell_off); |
251 | dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n" ); |
252 | kfd_hws_hang(dqm); |
253 | } |
254 | |
255 | return r; |
256 | } |
257 | |
258 | static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q, |
259 | struct qcm_process_device *qpd) |
260 | { |
261 | struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev; |
262 | int r; |
263 | struct mes_remove_queue_input queue_input; |
264 | |
265 | if (dqm->is_hws_hang) |
266 | return -EIO; |
267 | |
268 | memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input)); |
269 | queue_input.doorbell_offset = q->properties.doorbell_off; |
270 | queue_input.gang_context_addr = q->gang_ctx_gpu_addr; |
271 | |
272 | amdgpu_mes_lock(mes: &adev->mes); |
273 | r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input); |
274 | amdgpu_mes_unlock(mes: &adev->mes); |
275 | |
276 | if (r) { |
277 | dev_err(adev->dev, "failed to remove hardware queue from MES, doorbell=0x%x\n" , |
278 | q->properties.doorbell_off); |
279 | dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n" ); |
280 | kfd_hws_hang(dqm); |
281 | } |
282 | |
283 | return r; |
284 | } |
285 | |
286 | static int remove_all_queues_mes(struct device_queue_manager *dqm) |
287 | { |
288 | struct device_process_node *cur; |
289 | struct device *dev = dqm->dev->adev->dev; |
290 | struct qcm_process_device *qpd; |
291 | struct queue *q; |
292 | int retval = 0; |
293 | |
294 | list_for_each_entry(cur, &dqm->queues, list) { |
295 | qpd = cur->qpd; |
296 | list_for_each_entry(q, &qpd->queues_list, list) { |
297 | if (q->properties.is_active) { |
298 | retval = remove_queue_mes(dqm, q, qpd); |
299 | if (retval) { |
300 | dev_err(dev, "%s: Failed to remove queue %d for dev %d" , |
301 | __func__, |
302 | q->properties.queue_id, |
303 | dqm->dev->id); |
304 | return retval; |
305 | } |
306 | } |
307 | } |
308 | } |
309 | |
310 | return retval; |
311 | } |
312 | |
313 | static void increment_queue_count(struct device_queue_manager *dqm, |
314 | struct qcm_process_device *qpd, |
315 | struct queue *q) |
316 | { |
317 | dqm->active_queue_count++; |
318 | if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || |
319 | q->properties.type == KFD_QUEUE_TYPE_DIQ) |
320 | dqm->active_cp_queue_count++; |
321 | |
322 | if (q->properties.is_gws) { |
323 | dqm->gws_queue_count++; |
324 | qpd->mapped_gws_queue = true; |
325 | } |
326 | } |
327 | |
328 | static void decrement_queue_count(struct device_queue_manager *dqm, |
329 | struct qcm_process_device *qpd, |
330 | struct queue *q) |
331 | { |
332 | dqm->active_queue_count--; |
333 | if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || |
334 | q->properties.type == KFD_QUEUE_TYPE_DIQ) |
335 | dqm->active_cp_queue_count--; |
336 | |
337 | if (q->properties.is_gws) { |
338 | dqm->gws_queue_count--; |
339 | qpd->mapped_gws_queue = false; |
340 | } |
341 | } |
342 | |
343 | /* |
344 | * Allocate a doorbell ID to this queue. |
345 | * If doorbell_id is passed in, make sure requested ID is valid then allocate it. |
346 | */ |
347 | static int allocate_doorbell(struct qcm_process_device *qpd, |
348 | struct queue *q, |
349 | uint32_t const *restore_id) |
350 | { |
351 | struct kfd_node *dev = qpd->dqm->dev; |
352 | |
353 | if (!KFD_IS_SOC15(dev)) { |
354 | /* On pre-SOC15 chips we need to use the queue ID to |
355 | * preserve the user mode ABI. |
356 | */ |
357 | |
358 | if (restore_id && *restore_id != q->properties.queue_id) |
359 | return -EINVAL; |
360 | |
361 | q->doorbell_id = q->properties.queue_id; |
362 | } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA || |
363 | q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { |
364 | /* For SDMA queues on SOC15 with 8-byte doorbell, use static |
365 | * doorbell assignments based on the engine and queue id. |
366 | * The doobell index distance between RLC (2*i) and (2*i+1) |
367 | * for a SDMA engine is 512. |
368 | */ |
369 | |
370 | uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx; |
371 | |
372 | /* |
373 | * q->properties.sdma_engine_id corresponds to the virtual |
374 | * sdma engine number. However, for doorbell allocation, |
375 | * we need the physical sdma engine id in order to get the |
376 | * correct doorbell offset. |
377 | */ |
378 | uint32_t valid_id = idx_offset[qpd->dqm->dev->node_id * |
379 | get_num_all_sdma_engines(dqm: qpd->dqm) + |
380 | q->properties.sdma_engine_id] |
381 | + (q->properties.sdma_queue_id & 1) |
382 | * KFD_QUEUE_DOORBELL_MIRROR_OFFSET |
383 | + (q->properties.sdma_queue_id >> 1); |
384 | |
385 | if (restore_id && *restore_id != valid_id) |
386 | return -EINVAL; |
387 | q->doorbell_id = valid_id; |
388 | } else { |
389 | /* For CP queues on SOC15 */ |
390 | if (restore_id) { |
391 | /* make sure that ID is free */ |
392 | if (__test_and_set_bit(*restore_id, qpd->doorbell_bitmap)) |
393 | return -EINVAL; |
394 | |
395 | q->doorbell_id = *restore_id; |
396 | } else { |
397 | /* or reserve a free doorbell ID */ |
398 | unsigned int found; |
399 | |
400 | found = find_first_zero_bit(addr: qpd->doorbell_bitmap, |
401 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); |
402 | if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { |
403 | pr_debug("No doorbells available" ); |
404 | return -EBUSY; |
405 | } |
406 | set_bit(nr: found, addr: qpd->doorbell_bitmap); |
407 | q->doorbell_id = found; |
408 | } |
409 | } |
410 | |
411 | q->properties.doorbell_off = amdgpu_doorbell_index_on_bar(adev: dev->adev, |
412 | db_bo: qpd->proc_doorbells, |
413 | doorbell_index: q->doorbell_id, |
414 | db_size: dev->kfd->device_info.doorbell_size); |
415 | return 0; |
416 | } |
417 | |
418 | static void deallocate_doorbell(struct qcm_process_device *qpd, |
419 | struct queue *q) |
420 | { |
421 | unsigned int old; |
422 | struct kfd_node *dev = qpd->dqm->dev; |
423 | |
424 | if (!KFD_IS_SOC15(dev) || |
425 | q->properties.type == KFD_QUEUE_TYPE_SDMA || |
426 | q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) |
427 | return; |
428 | |
429 | old = test_and_clear_bit(nr: q->doorbell_id, addr: qpd->doorbell_bitmap); |
430 | WARN_ON(!old); |
431 | } |
432 | |
433 | static void program_trap_handler_settings(struct device_queue_manager *dqm, |
434 | struct qcm_process_device *qpd) |
435 | { |
436 | uint32_t xcc_mask = dqm->dev->xcc_mask; |
437 | int xcc_id; |
438 | |
439 | if (dqm->dev->kfd2kgd->program_trap_handler_settings) |
440 | for_each_inst(xcc_id, xcc_mask) |
441 | dqm->dev->kfd2kgd->program_trap_handler_settings( |
442 | dqm->dev->adev, qpd->vmid, qpd->tba_addr, |
443 | qpd->tma_addr, xcc_id); |
444 | } |
445 | |
446 | static int allocate_vmid(struct device_queue_manager *dqm, |
447 | struct qcm_process_device *qpd, |
448 | struct queue *q) |
449 | { |
450 | struct device *dev = dqm->dev->adev->dev; |
451 | int allocated_vmid = -1, i; |
452 | |
453 | for (i = dqm->dev->vm_info.first_vmid_kfd; |
454 | i <= dqm->dev->vm_info.last_vmid_kfd; i++) { |
455 | if (!dqm->vmid_pasid[i]) { |
456 | allocated_vmid = i; |
457 | break; |
458 | } |
459 | } |
460 | |
461 | if (allocated_vmid < 0) { |
462 | dev_err(dev, "no more vmid to allocate\n" ); |
463 | return -ENOSPC; |
464 | } |
465 | |
466 | pr_debug("vmid allocated: %d\n" , allocated_vmid); |
467 | |
468 | dqm->vmid_pasid[allocated_vmid] = q->process->pasid; |
469 | |
470 | set_pasid_vmid_mapping(dqm, pasid: q->process->pasid, vmid: allocated_vmid); |
471 | |
472 | qpd->vmid = allocated_vmid; |
473 | q->properties.vmid = allocated_vmid; |
474 | |
475 | program_sh_mem_settings(dqm, qpd); |
476 | |
477 | if (KFD_IS_SOC15(dqm->dev) && dqm->dev->kfd->cwsr_enabled) |
478 | program_trap_handler_settings(dqm, qpd); |
479 | |
480 | /* qpd->page_table_base is set earlier when register_process() |
481 | * is called, i.e. when the first queue is created. |
482 | */ |
483 | dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->adev, |
484 | qpd->vmid, |
485 | qpd->page_table_base); |
486 | /* invalidate the VM context after pasid and vmid mapping is set up */ |
487 | kfd_flush_tlb(qpd_to_pdd(qpd), type: TLB_FLUSH_LEGACY); |
488 | |
489 | if (dqm->dev->kfd2kgd->set_scratch_backing_va) |
490 | dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev, |
491 | qpd->sh_hidden_private_base, qpd->vmid); |
492 | |
493 | return 0; |
494 | } |
495 | |
496 | static int flush_texture_cache_nocpsch(struct kfd_node *kdev, |
497 | struct qcm_process_device *qpd) |
498 | { |
499 | const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf; |
500 | int ret; |
501 | |
502 | if (!qpd->ib_kaddr) |
503 | return -ENOMEM; |
504 | |
505 | ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr); |
506 | if (ret) |
507 | return ret; |
508 | |
509 | return amdgpu_amdkfd_submit_ib(adev: kdev->adev, engine: KGD_ENGINE_MEC1, vmid: qpd->vmid, |
510 | gpu_addr: qpd->ib_base, ib_cmd: (uint32_t *)qpd->ib_kaddr, |
511 | ib_len: pmf->release_mem_size / sizeof(uint32_t)); |
512 | } |
513 | |
514 | static void deallocate_vmid(struct device_queue_manager *dqm, |
515 | struct qcm_process_device *qpd, |
516 | struct queue *q) |
517 | { |
518 | struct device *dev = dqm->dev->adev->dev; |
519 | |
520 | /* On GFX v7, CP doesn't flush TC at dequeue */ |
521 | if (q->device->adev->asic_type == CHIP_HAWAII) |
522 | if (flush_texture_cache_nocpsch(kdev: q->device, qpd)) |
523 | dev_err(dev, "Failed to flush TC\n" ); |
524 | |
525 | kfd_flush_tlb(qpd_to_pdd(qpd), type: TLB_FLUSH_LEGACY); |
526 | |
527 | /* Release the vmid mapping */ |
528 | set_pasid_vmid_mapping(dqm, pasid: 0, vmid: qpd->vmid); |
529 | dqm->vmid_pasid[qpd->vmid] = 0; |
530 | |
531 | qpd->vmid = 0; |
532 | q->properties.vmid = 0; |
533 | } |
534 | |
535 | static int create_queue_nocpsch(struct device_queue_manager *dqm, |
536 | struct queue *q, |
537 | struct qcm_process_device *qpd, |
538 | const struct kfd_criu_queue_priv_data *qd, |
539 | const void *restore_mqd, const void *restore_ctl_stack) |
540 | { |
541 | struct mqd_manager *mqd_mgr; |
542 | int retval; |
543 | |
544 | dqm_lock(dqm); |
545 | |
546 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { |
547 | pr_warn("Can't create new usermode queue because %d queues were already created\n" , |
548 | dqm->total_queue_count); |
549 | retval = -EPERM; |
550 | goto out_unlock; |
551 | } |
552 | |
553 | if (list_empty(head: &qpd->queues_list)) { |
554 | retval = allocate_vmid(dqm, qpd, q); |
555 | if (retval) |
556 | goto out_unlock; |
557 | } |
558 | q->properties.vmid = qpd->vmid; |
559 | /* |
560 | * Eviction state logic: mark all queues as evicted, even ones |
561 | * not currently active. Restoring inactive queues later only |
562 | * updates the is_evicted flag but is a no-op otherwise. |
563 | */ |
564 | q->properties.is_evicted = !!qpd->evicted; |
565 | |
566 | q->properties.tba_addr = qpd->tba_addr; |
567 | q->properties.tma_addr = qpd->tma_addr; |
568 | |
569 | mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( |
570 | type: q->properties.type)]; |
571 | if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { |
572 | retval = allocate_hqd(dqm, q); |
573 | if (retval) |
574 | goto deallocate_vmid; |
575 | pr_debug("Loading mqd to hqd on pipe %d, queue %d\n" , |
576 | q->pipe, q->queue); |
577 | } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA || |
578 | q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { |
579 | retval = allocate_sdma_queue(dqm, q, restore_sdma_id: qd ? &qd->sdma_id : NULL); |
580 | if (retval) |
581 | goto deallocate_vmid; |
582 | dqm->asic_ops.init_sdma_vm(dqm, q, qpd); |
583 | } |
584 | |
585 | retval = allocate_doorbell(qpd, q, restore_id: qd ? &qd->doorbell_id : NULL); |
586 | if (retval) |
587 | goto out_deallocate_hqd; |
588 | |
589 | /* Temporarily release dqm lock to avoid a circular lock dependency */ |
590 | dqm_unlock(dqm); |
591 | q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties); |
592 | dqm_lock(dqm); |
593 | |
594 | if (!q->mqd_mem_obj) { |
595 | retval = -ENOMEM; |
596 | goto out_deallocate_doorbell; |
597 | } |
598 | |
599 | if (qd) |
600 | mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, |
601 | &q->properties, restore_mqd, restore_ctl_stack, |
602 | qd->ctl_stack_size); |
603 | else |
604 | mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, |
605 | &q->gart_mqd_addr, &q->properties); |
606 | |
607 | if (q->properties.is_active) { |
608 | if (!dqm->sched_running) { |
609 | WARN_ONCE(1, "Load non-HWS mqd while stopped\n" ); |
610 | goto add_queue_to_list; |
611 | } |
612 | |
613 | if (WARN(q->process->mm != current->mm, |
614 | "should only run in user thread" )) |
615 | retval = -EFAULT; |
616 | else |
617 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, |
618 | q->queue, &q->properties, current->mm); |
619 | if (retval) |
620 | goto out_free_mqd; |
621 | } |
622 | |
623 | add_queue_to_list: |
624 | list_add(new: &q->list, head: &qpd->queues_list); |
625 | qpd->queue_count++; |
626 | if (q->properties.is_active) |
627 | increment_queue_count(dqm, qpd, q); |
628 | |
629 | /* |
630 | * Unconditionally increment this counter, regardless of the queue's |
631 | * type or whether the queue is active. |
632 | */ |
633 | dqm->total_queue_count++; |
634 | pr_debug("Total of %d queues are accountable so far\n" , |
635 | dqm->total_queue_count); |
636 | goto out_unlock; |
637 | |
638 | out_free_mqd: |
639 | mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); |
640 | out_deallocate_doorbell: |
641 | deallocate_doorbell(qpd, q); |
642 | out_deallocate_hqd: |
643 | if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) |
644 | deallocate_hqd(dqm, q); |
645 | else if (q->properties.type == KFD_QUEUE_TYPE_SDMA || |
646 | q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) |
647 | deallocate_sdma_queue(dqm, q); |
648 | deallocate_vmid: |
649 | if (list_empty(head: &qpd->queues_list)) |
650 | deallocate_vmid(dqm, qpd, q); |
651 | out_unlock: |
652 | dqm_unlock(dqm); |
653 | return retval; |
654 | } |
655 | |
656 | static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) |
657 | { |
658 | bool set; |
659 | int pipe, bit, i; |
660 | |
661 | set = false; |
662 | |
663 | for (pipe = dqm->next_pipe_to_allocate, i = 0; |
664 | i < get_pipes_per_mec(dqm); |
665 | pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) { |
666 | |
667 | if (!is_pipe_enabled(dqm, mec: 0, pipe)) |
668 | continue; |
669 | |
670 | if (dqm->allocated_queues[pipe] != 0) { |
671 | bit = ffs(dqm->allocated_queues[pipe]) - 1; |
672 | dqm->allocated_queues[pipe] &= ~(1 << bit); |
673 | q->pipe = pipe; |
674 | q->queue = bit; |
675 | set = true; |
676 | break; |
677 | } |
678 | } |
679 | |
680 | if (!set) |
681 | return -EBUSY; |
682 | |
683 | pr_debug("hqd slot - pipe %d, queue %d\n" , q->pipe, q->queue); |
684 | /* horizontal hqd allocation */ |
685 | dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm); |
686 | |
687 | return 0; |
688 | } |
689 | |
690 | static inline void deallocate_hqd(struct device_queue_manager *dqm, |
691 | struct queue *q) |
692 | { |
693 | dqm->allocated_queues[q->pipe] |= (1 << q->queue); |
694 | } |
695 | |
696 | #define SQ_IND_CMD_CMD_KILL 0x00000003 |
697 | #define SQ_IND_CMD_MODE_BROADCAST 0x00000001 |
698 | |
699 | static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process *p) |
700 | { |
701 | int status = 0; |
702 | unsigned int vmid; |
703 | uint16_t queried_pasid; |
704 | union SQ_CMD_BITS reg_sq_cmd; |
705 | union GRBM_GFX_INDEX_BITS reg_gfx_index; |
706 | struct kfd_process_device *pdd; |
707 | int first_vmid_to_scan = dev->vm_info.first_vmid_kfd; |
708 | int last_vmid_to_scan = dev->vm_info.last_vmid_kfd; |
709 | uint32_t xcc_mask = dev->xcc_mask; |
710 | int xcc_id; |
711 | |
712 | reg_sq_cmd.u32All = 0; |
713 | reg_gfx_index.u32All = 0; |
714 | |
715 | pr_debug("Killing all process wavefronts\n" ); |
716 | |
717 | if (!dev->kfd2kgd->get_atc_vmid_pasid_mapping_info) { |
718 | dev_err(dev->adev->dev, "no vmid pasid mapping supported\n" ); |
719 | return -EOPNOTSUPP; |
720 | } |
721 | |
722 | /* Scan all registers in the range ATC_VMID8_PASID_MAPPING .. |
723 | * ATC_VMID15_PASID_MAPPING |
724 | * to check which VMID the current process is mapped to. |
725 | */ |
726 | |
727 | for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) { |
728 | status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info |
729 | (dev->adev, vmid, &queried_pasid); |
730 | |
731 | if (status && queried_pasid == p->pasid) { |
732 | pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n" , |
733 | vmid, p->pasid); |
734 | break; |
735 | } |
736 | } |
737 | |
738 | if (vmid > last_vmid_to_scan) { |
739 | dev_err(dev->adev->dev, "Didn't find vmid for pasid 0x%x\n" , p->pasid); |
740 | return -EFAULT; |
741 | } |
742 | |
743 | /* taking the VMID for that process on the safe way using PDD */ |
744 | pdd = kfd_get_process_device_data(dev, p); |
745 | if (!pdd) |
746 | return -EFAULT; |
747 | |
748 | reg_gfx_index.bits.sh_broadcast_writes = 1; |
749 | reg_gfx_index.bits.se_broadcast_writes = 1; |
750 | reg_gfx_index.bits.instance_broadcast_writes = 1; |
751 | reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST; |
752 | reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL; |
753 | reg_sq_cmd.bits.vm_id = vmid; |
754 | |
755 | for_each_inst(xcc_id, xcc_mask) |
756 | dev->kfd2kgd->wave_control_execute( |
757 | dev->adev, reg_gfx_index.u32All, |
758 | reg_sq_cmd.u32All, xcc_id); |
759 | |
760 | return 0; |
761 | } |
762 | |
763 | /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked |
764 | * to avoid asynchronized access |
765 | */ |
766 | static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, |
767 | struct qcm_process_device *qpd, |
768 | struct queue *q) |
769 | { |
770 | int retval; |
771 | struct mqd_manager *mqd_mgr; |
772 | |
773 | mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( |
774 | type: q->properties.type)]; |
775 | |
776 | if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) |
777 | deallocate_hqd(dqm, q); |
778 | else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) |
779 | deallocate_sdma_queue(dqm, q); |
780 | else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) |
781 | deallocate_sdma_queue(dqm, q); |
782 | else { |
783 | pr_debug("q->properties.type %d is invalid\n" , |
784 | q->properties.type); |
785 | return -EINVAL; |
786 | } |
787 | dqm->total_queue_count--; |
788 | |
789 | deallocate_doorbell(qpd, q); |
790 | |
791 | if (!dqm->sched_running) { |
792 | WARN_ONCE(1, "Destroy non-HWS queue while stopped\n" ); |
793 | return 0; |
794 | } |
795 | |
796 | retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, |
797 | KFD_PREEMPT_TYPE_WAVEFRONT_RESET, |
798 | KFD_UNMAP_LATENCY_MS, |
799 | q->pipe, q->queue); |
800 | if (retval == -ETIME) |
801 | qpd->reset_wavefronts = true; |
802 | |
803 | list_del(entry: &q->list); |
804 | if (list_empty(head: &qpd->queues_list)) { |
805 | if (qpd->reset_wavefronts) { |
806 | pr_warn("Resetting wave fronts (nocpsch) on dev %p\n" , |
807 | dqm->dev); |
808 | /* dbgdev_wave_reset_wavefronts has to be called before |
809 | * deallocate_vmid(), i.e. when vmid is still in use. |
810 | */ |
811 | dbgdev_wave_reset_wavefronts(dev: dqm->dev, |
812 | p: qpd->pqm->process); |
813 | qpd->reset_wavefronts = false; |
814 | } |
815 | |
816 | deallocate_vmid(dqm, qpd, q); |
817 | } |
818 | qpd->queue_count--; |
819 | if (q->properties.is_active) |
820 | decrement_queue_count(dqm, qpd, q); |
821 | |
822 | return retval; |
823 | } |
824 | |
825 | static int destroy_queue_nocpsch(struct device_queue_manager *dqm, |
826 | struct qcm_process_device *qpd, |
827 | struct queue *q) |
828 | { |
829 | int retval; |
830 | uint64_t sdma_val = 0; |
831 | struct device *dev = dqm->dev->adev->dev; |
832 | struct kfd_process_device *pdd = qpd_to_pdd(qpd); |
833 | struct mqd_manager *mqd_mgr = |
834 | dqm->mqd_mgrs[get_mqd_type_from_queue_type(type: q->properties.type)]; |
835 | |
836 | /* Get the SDMA queue stats */ |
837 | if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) || |
838 | (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { |
839 | retval = read_sdma_queue_counter(q_rptr: (uint64_t __user *)q->properties.read_ptr, |
840 | val: &sdma_val); |
841 | if (retval) |
842 | dev_err(dev, "Failed to read SDMA queue counter for queue: %d\n" , |
843 | q->properties.queue_id); |
844 | } |
845 | |
846 | dqm_lock(dqm); |
847 | retval = destroy_queue_nocpsch_locked(dqm, qpd, q); |
848 | if (!retval) |
849 | pdd->sdma_past_activity_counter += sdma_val; |
850 | dqm_unlock(dqm); |
851 | |
852 | mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); |
853 | |
854 | return retval; |
855 | } |
856 | |
857 | static int update_queue(struct device_queue_manager *dqm, struct queue *q, |
858 | struct mqd_update_info *minfo) |
859 | { |
860 | int retval = 0; |
861 | struct device *dev = dqm->dev->adev->dev; |
862 | struct mqd_manager *mqd_mgr; |
863 | struct kfd_process_device *pdd; |
864 | bool prev_active = false; |
865 | |
866 | dqm_lock(dqm); |
867 | pdd = kfd_get_process_device_data(dev: q->device, p: q->process); |
868 | if (!pdd) { |
869 | retval = -ENODEV; |
870 | goto out_unlock; |
871 | } |
872 | mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( |
873 | type: q->properties.type)]; |
874 | |
875 | /* Save previous activity state for counters */ |
876 | prev_active = q->properties.is_active; |
877 | |
878 | /* Make sure the queue is unmapped before updating the MQD */ |
879 | if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { |
880 | if (!dqm->dev->kfd->shared_resources.enable_mes) |
881 | retval = unmap_queues_cpsch(dqm, |
882 | filter: KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, filter_param: 0, USE_DEFAULT_GRACE_PERIOD, reset: false); |
883 | else if (prev_active) |
884 | retval = remove_queue_mes(dqm, q, qpd: &pdd->qpd); |
885 | |
886 | if (retval) { |
887 | dev_err(dev, "unmap queue failed\n" ); |
888 | goto out_unlock; |
889 | } |
890 | } else if (prev_active && |
891 | (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || |
892 | q->properties.type == KFD_QUEUE_TYPE_SDMA || |
893 | q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { |
894 | |
895 | if (!dqm->sched_running) { |
896 | WARN_ONCE(1, "Update non-HWS queue while stopped\n" ); |
897 | goto out_unlock; |
898 | } |
899 | |
900 | retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, |
901 | (dqm->dev->kfd->cwsr_enabled ? |
902 | KFD_PREEMPT_TYPE_WAVEFRONT_SAVE : |
903 | KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN), |
904 | KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); |
905 | if (retval) { |
906 | dev_err(dev, "destroy mqd failed\n" ); |
907 | goto out_unlock; |
908 | } |
909 | } |
910 | |
911 | mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties, minfo); |
912 | |
913 | /* |
914 | * check active state vs. the previous state and modify |
915 | * counter accordingly. map_queues_cpsch uses the |
916 | * dqm->active_queue_count to determine whether a new runlist must be |
917 | * uploaded. |
918 | */ |
919 | if (q->properties.is_active && !prev_active) { |
920 | increment_queue_count(dqm, qpd: &pdd->qpd, q); |
921 | } else if (!q->properties.is_active && prev_active) { |
922 | decrement_queue_count(dqm, qpd: &pdd->qpd, q); |
923 | } else if (q->gws && !q->properties.is_gws) { |
924 | if (q->properties.is_active) { |
925 | dqm->gws_queue_count++; |
926 | pdd->qpd.mapped_gws_queue = true; |
927 | } |
928 | q->properties.is_gws = true; |
929 | } else if (!q->gws && q->properties.is_gws) { |
930 | if (q->properties.is_active) { |
931 | dqm->gws_queue_count--; |
932 | pdd->qpd.mapped_gws_queue = false; |
933 | } |
934 | q->properties.is_gws = false; |
935 | } |
936 | |
937 | if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { |
938 | if (!dqm->dev->kfd->shared_resources.enable_mes) |
939 | retval = map_queues_cpsch(dqm); |
940 | else if (q->properties.is_active) |
941 | retval = add_queue_mes(dqm, q, qpd: &pdd->qpd); |
942 | } else if (q->properties.is_active && |
943 | (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || |
944 | q->properties.type == KFD_QUEUE_TYPE_SDMA || |
945 | q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { |
946 | if (WARN(q->process->mm != current->mm, |
947 | "should only run in user thread" )) |
948 | retval = -EFAULT; |
949 | else |
950 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, |
951 | q->pipe, q->queue, |
952 | &q->properties, current->mm); |
953 | } |
954 | |
955 | out_unlock: |
956 | dqm_unlock(dqm); |
957 | return retval; |
958 | } |
959 | |
960 | /* suspend_single_queue does not lock the dqm like the |
961 | * evict_process_queues_cpsch or evict_process_queues_nocpsch. You should |
962 | * lock the dqm before calling, and unlock after calling. |
963 | * |
964 | * The reason we don't lock the dqm is because this function may be |
965 | * called on multiple queues in a loop, so rather than locking/unlocking |
966 | * multiple times, we will just keep the dqm locked for all of the calls. |
967 | */ |
968 | static int suspend_single_queue(struct device_queue_manager *dqm, |
969 | struct kfd_process_device *pdd, |
970 | struct queue *q) |
971 | { |
972 | bool is_new; |
973 | |
974 | if (q->properties.is_suspended) |
975 | return 0; |
976 | |
977 | pr_debug("Suspending PASID %u queue [%i]\n" , |
978 | pdd->process->pasid, |
979 | q->properties.queue_id); |
980 | |
981 | is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW); |
982 | |
983 | if (is_new || q->properties.is_being_destroyed) { |
984 | pr_debug("Suspend: skip %s queue id %i\n" , |
985 | is_new ? "new" : "destroyed" , |
986 | q->properties.queue_id); |
987 | return -EBUSY; |
988 | } |
989 | |
990 | q->properties.is_suspended = true; |
991 | if (q->properties.is_active) { |
992 | if (dqm->dev->kfd->shared_resources.enable_mes) { |
993 | int r = remove_queue_mes(dqm, q, qpd: &pdd->qpd); |
994 | |
995 | if (r) |
996 | return r; |
997 | } |
998 | |
999 | decrement_queue_count(dqm, qpd: &pdd->qpd, q); |
1000 | q->properties.is_active = false; |
1001 | } |
1002 | |
1003 | return 0; |
1004 | } |
1005 | |
1006 | /* resume_single_queue does not lock the dqm like the functions |
1007 | * restore_process_queues_cpsch or restore_process_queues_nocpsch. You should |
1008 | * lock the dqm before calling, and unlock after calling. |
1009 | * |
1010 | * The reason we don't lock the dqm is because this function may be |
1011 | * called on multiple queues in a loop, so rather than locking/unlocking |
1012 | * multiple times, we will just keep the dqm locked for all of the calls. |
1013 | */ |
1014 | static int resume_single_queue(struct device_queue_manager *dqm, |
1015 | struct qcm_process_device *qpd, |
1016 | struct queue *q) |
1017 | { |
1018 | struct kfd_process_device *pdd; |
1019 | |
1020 | if (!q->properties.is_suspended) |
1021 | return 0; |
1022 | |
1023 | pdd = qpd_to_pdd(qpd); |
1024 | |
1025 | pr_debug("Restoring from suspend PASID %u queue [%i]\n" , |
1026 | pdd->process->pasid, |
1027 | q->properties.queue_id); |
1028 | |
1029 | q->properties.is_suspended = false; |
1030 | |
1031 | if (QUEUE_IS_ACTIVE(q->properties)) { |
1032 | if (dqm->dev->kfd->shared_resources.enable_mes) { |
1033 | int r = add_queue_mes(dqm, q, qpd: &pdd->qpd); |
1034 | |
1035 | if (r) |
1036 | return r; |
1037 | } |
1038 | |
1039 | q->properties.is_active = true; |
1040 | increment_queue_count(dqm, qpd, q); |
1041 | } |
1042 | |
1043 | return 0; |
1044 | } |
1045 | |
1046 | static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, |
1047 | struct qcm_process_device *qpd) |
1048 | { |
1049 | struct queue *q; |
1050 | struct mqd_manager *mqd_mgr; |
1051 | struct kfd_process_device *pdd; |
1052 | int retval, ret = 0; |
1053 | |
1054 | dqm_lock(dqm); |
1055 | if (qpd->evicted++ > 0) /* already evicted, do nothing */ |
1056 | goto out; |
1057 | |
1058 | pdd = qpd_to_pdd(qpd); |
1059 | pr_debug_ratelimited("Evicting PASID 0x%x queues\n" , |
1060 | pdd->process->pasid); |
1061 | |
1062 | pdd->last_evict_timestamp = get_jiffies_64(); |
1063 | /* Mark all queues as evicted. Deactivate all active queues on |
1064 | * the qpd. |
1065 | */ |
1066 | list_for_each_entry(q, &qpd->queues_list, list) { |
1067 | q->properties.is_evicted = true; |
1068 | if (!q->properties.is_active) |
1069 | continue; |
1070 | |
1071 | mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( |
1072 | type: q->properties.type)]; |
1073 | q->properties.is_active = false; |
1074 | decrement_queue_count(dqm, qpd, q); |
1075 | |
1076 | if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n" )) |
1077 | continue; |
1078 | |
1079 | retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, |
1080 | (dqm->dev->kfd->cwsr_enabled ? |
1081 | KFD_PREEMPT_TYPE_WAVEFRONT_SAVE : |
1082 | KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN), |
1083 | KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); |
1084 | if (retval && !ret) |
1085 | /* Return the first error, but keep going to |
1086 | * maintain a consistent eviction state |
1087 | */ |
1088 | ret = retval; |
1089 | } |
1090 | |
1091 | out: |
1092 | dqm_unlock(dqm); |
1093 | return ret; |
1094 | } |
1095 | |
1096 | static int evict_process_queues_cpsch(struct device_queue_manager *dqm, |
1097 | struct qcm_process_device *qpd) |
1098 | { |
1099 | struct queue *q; |
1100 | struct device *dev = dqm->dev->adev->dev; |
1101 | struct kfd_process_device *pdd; |
1102 | int retval = 0; |
1103 | |
1104 | dqm_lock(dqm); |
1105 | if (qpd->evicted++ > 0) /* already evicted, do nothing */ |
1106 | goto out; |
1107 | |
1108 | pdd = qpd_to_pdd(qpd); |
1109 | |
1110 | /* The debugger creates processes that temporarily have not acquired |
1111 | * all VMs for all devices and has no VMs itself. |
1112 | * Skip queue eviction on process eviction. |
1113 | */ |
1114 | if (!pdd->drm_priv) |
1115 | goto out; |
1116 | |
1117 | pr_debug_ratelimited("Evicting PASID 0x%x queues\n" , |
1118 | pdd->process->pasid); |
1119 | |
1120 | /* Mark all queues as evicted. Deactivate all active queues on |
1121 | * the qpd. |
1122 | */ |
1123 | list_for_each_entry(q, &qpd->queues_list, list) { |
1124 | q->properties.is_evicted = true; |
1125 | if (!q->properties.is_active) |
1126 | continue; |
1127 | |
1128 | q->properties.is_active = false; |
1129 | decrement_queue_count(dqm, qpd, q); |
1130 | |
1131 | if (dqm->dev->kfd->shared_resources.enable_mes) { |
1132 | retval = remove_queue_mes(dqm, q, qpd); |
1133 | if (retval) { |
1134 | dev_err(dev, "Failed to evict queue %d\n" , |
1135 | q->properties.queue_id); |
1136 | goto out; |
1137 | } |
1138 | } |
1139 | } |
1140 | pdd->last_evict_timestamp = get_jiffies_64(); |
1141 | if (!dqm->dev->kfd->shared_resources.enable_mes) |
1142 | retval = execute_queues_cpsch(dqm, |
1143 | filter: qpd->is_debug ? |
1144 | KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : |
1145 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, filter_param: 0, |
1146 | USE_DEFAULT_GRACE_PERIOD); |
1147 | |
1148 | out: |
1149 | dqm_unlock(dqm); |
1150 | return retval; |
1151 | } |
1152 | |
1153 | static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, |
1154 | struct qcm_process_device *qpd) |
1155 | { |
1156 | struct mm_struct *mm = NULL; |
1157 | struct queue *q; |
1158 | struct mqd_manager *mqd_mgr; |
1159 | struct kfd_process_device *pdd; |
1160 | uint64_t pd_base; |
1161 | uint64_t eviction_duration; |
1162 | int retval, ret = 0; |
1163 | |
1164 | pdd = qpd_to_pdd(qpd); |
1165 | /* Retrieve PD base */ |
1166 | pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(drm_priv: pdd->drm_priv); |
1167 | |
1168 | dqm_lock(dqm); |
1169 | if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ |
1170 | goto out; |
1171 | if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */ |
1172 | qpd->evicted--; |
1173 | goto out; |
1174 | } |
1175 | |
1176 | pr_debug_ratelimited("Restoring PASID 0x%x queues\n" , |
1177 | pdd->process->pasid); |
1178 | |
1179 | /* Update PD Base in QPD */ |
1180 | qpd->page_table_base = pd_base; |
1181 | pr_debug("Updated PD address to 0x%llx\n" , pd_base); |
1182 | |
1183 | if (!list_empty(head: &qpd->queues_list)) { |
1184 | dqm->dev->kfd2kgd->set_vm_context_page_table_base( |
1185 | dqm->dev->adev, |
1186 | qpd->vmid, |
1187 | qpd->page_table_base); |
1188 | kfd_flush_tlb(pdd, type: TLB_FLUSH_LEGACY); |
1189 | } |
1190 | |
1191 | /* Take a safe reference to the mm_struct, which may otherwise |
1192 | * disappear even while the kfd_process is still referenced. |
1193 | */ |
1194 | mm = get_task_mm(task: pdd->process->lead_thread); |
1195 | if (!mm) { |
1196 | ret = -EFAULT; |
1197 | goto out; |
1198 | } |
1199 | |
1200 | /* Remove the eviction flags. Activate queues that are not |
1201 | * inactive for other reasons. |
1202 | */ |
1203 | list_for_each_entry(q, &qpd->queues_list, list) { |
1204 | q->properties.is_evicted = false; |
1205 | if (!QUEUE_IS_ACTIVE(q->properties)) |
1206 | continue; |
1207 | |
1208 | mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( |
1209 | type: q->properties.type)]; |
1210 | q->properties.is_active = true; |
1211 | increment_queue_count(dqm, qpd, q); |
1212 | |
1213 | if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n" )) |
1214 | continue; |
1215 | |
1216 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, |
1217 | q->queue, &q->properties, mm); |
1218 | if (retval && !ret) |
1219 | /* Return the first error, but keep going to |
1220 | * maintain a consistent eviction state |
1221 | */ |
1222 | ret = retval; |
1223 | } |
1224 | qpd->evicted = 0; |
1225 | eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp; |
1226 | atomic64_add(i: eviction_duration, v: &pdd->evict_duration_counter); |
1227 | out: |
1228 | if (mm) |
1229 | mmput(mm); |
1230 | dqm_unlock(dqm); |
1231 | return ret; |
1232 | } |
1233 | |
1234 | static int restore_process_queues_cpsch(struct device_queue_manager *dqm, |
1235 | struct qcm_process_device *qpd) |
1236 | { |
1237 | struct queue *q; |
1238 | struct device *dev = dqm->dev->adev->dev; |
1239 | struct kfd_process_device *pdd; |
1240 | uint64_t eviction_duration; |
1241 | int retval = 0; |
1242 | |
1243 | pdd = qpd_to_pdd(qpd); |
1244 | |
1245 | dqm_lock(dqm); |
1246 | if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ |
1247 | goto out; |
1248 | if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */ |
1249 | qpd->evicted--; |
1250 | goto out; |
1251 | } |
1252 | |
1253 | /* The debugger creates processes that temporarily have not acquired |
1254 | * all VMs for all devices and has no VMs itself. |
1255 | * Skip queue restore on process restore. |
1256 | */ |
1257 | if (!pdd->drm_priv) |
1258 | goto vm_not_acquired; |
1259 | |
1260 | pr_debug_ratelimited("Restoring PASID 0x%x queues\n" , |
1261 | pdd->process->pasid); |
1262 | |
1263 | /* Update PD Base in QPD */ |
1264 | qpd->page_table_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(drm_priv: pdd->drm_priv); |
1265 | pr_debug("Updated PD address to 0x%llx\n" , qpd->page_table_base); |
1266 | |
1267 | /* activate all active queues on the qpd */ |
1268 | list_for_each_entry(q, &qpd->queues_list, list) { |
1269 | q->properties.is_evicted = false; |
1270 | if (!QUEUE_IS_ACTIVE(q->properties)) |
1271 | continue; |
1272 | |
1273 | q->properties.is_active = true; |
1274 | increment_queue_count(dqm, qpd: &pdd->qpd, q); |
1275 | |
1276 | if (dqm->dev->kfd->shared_resources.enable_mes) { |
1277 | retval = add_queue_mes(dqm, q, qpd); |
1278 | if (retval) { |
1279 | dev_err(dev, "Failed to restore queue %d\n" , |
1280 | q->properties.queue_id); |
1281 | goto out; |
1282 | } |
1283 | } |
1284 | } |
1285 | if (!dqm->dev->kfd->shared_resources.enable_mes) |
1286 | retval = execute_queues_cpsch(dqm, |
1287 | filter: KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, filter_param: 0, USE_DEFAULT_GRACE_PERIOD); |
1288 | eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp; |
1289 | atomic64_add(i: eviction_duration, v: &pdd->evict_duration_counter); |
1290 | vm_not_acquired: |
1291 | qpd->evicted = 0; |
1292 | out: |
1293 | dqm_unlock(dqm); |
1294 | return retval; |
1295 | } |
1296 | |
1297 | static int register_process(struct device_queue_manager *dqm, |
1298 | struct qcm_process_device *qpd) |
1299 | { |
1300 | struct device_process_node *n; |
1301 | struct kfd_process_device *pdd; |
1302 | uint64_t pd_base; |
1303 | int retval; |
1304 | |
1305 | n = kzalloc(size: sizeof(*n), GFP_KERNEL); |
1306 | if (!n) |
1307 | return -ENOMEM; |
1308 | |
1309 | n->qpd = qpd; |
1310 | |
1311 | pdd = qpd_to_pdd(qpd); |
1312 | /* Retrieve PD base */ |
1313 | pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(drm_priv: pdd->drm_priv); |
1314 | |
1315 | dqm_lock(dqm); |
1316 | list_add(new: &n->list, head: &dqm->queues); |
1317 | |
1318 | /* Update PD Base in QPD */ |
1319 | qpd->page_table_base = pd_base; |
1320 | pr_debug("Updated PD address to 0x%llx\n" , pd_base); |
1321 | |
1322 | retval = dqm->asic_ops.update_qpd(dqm, qpd); |
1323 | |
1324 | dqm->processes_count++; |
1325 | |
1326 | dqm_unlock(dqm); |
1327 | |
1328 | /* Outside the DQM lock because under the DQM lock we can't do |
1329 | * reclaim or take other locks that others hold while reclaiming. |
1330 | */ |
1331 | kfd_inc_compute_active(dev: dqm->dev); |
1332 | |
1333 | return retval; |
1334 | } |
1335 | |
1336 | static int unregister_process(struct device_queue_manager *dqm, |
1337 | struct qcm_process_device *qpd) |
1338 | { |
1339 | int retval; |
1340 | struct device_process_node *cur, *next; |
1341 | |
1342 | pr_debug("qpd->queues_list is %s\n" , |
1343 | list_empty(&qpd->queues_list) ? "empty" : "not empty" ); |
1344 | |
1345 | retval = 0; |
1346 | dqm_lock(dqm); |
1347 | |
1348 | list_for_each_entry_safe(cur, next, &dqm->queues, list) { |
1349 | if (qpd == cur->qpd) { |
1350 | list_del(entry: &cur->list); |
1351 | kfree(objp: cur); |
1352 | dqm->processes_count--; |
1353 | goto out; |
1354 | } |
1355 | } |
1356 | /* qpd not found in dqm list */ |
1357 | retval = 1; |
1358 | out: |
1359 | dqm_unlock(dqm); |
1360 | |
1361 | /* Outside the DQM lock because under the DQM lock we can't do |
1362 | * reclaim or take other locks that others hold while reclaiming. |
1363 | */ |
1364 | if (!retval) |
1365 | kfd_dec_compute_active(dev: dqm->dev); |
1366 | |
1367 | return retval; |
1368 | } |
1369 | |
1370 | static int |
1371 | set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, |
1372 | unsigned int vmid) |
1373 | { |
1374 | uint32_t xcc_mask = dqm->dev->xcc_mask; |
1375 | int xcc_id, ret; |
1376 | |
1377 | for_each_inst(xcc_id, xcc_mask) { |
1378 | ret = dqm->dev->kfd2kgd->set_pasid_vmid_mapping( |
1379 | dqm->dev->adev, pasid, vmid, xcc_id); |
1380 | if (ret) |
1381 | break; |
1382 | } |
1383 | |
1384 | return ret; |
1385 | } |
1386 | |
1387 | static void init_interrupts(struct device_queue_manager *dqm) |
1388 | { |
1389 | uint32_t xcc_mask = dqm->dev->xcc_mask; |
1390 | unsigned int i, xcc_id; |
1391 | |
1392 | for_each_inst(xcc_id, xcc_mask) { |
1393 | for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) { |
1394 | if (is_pipe_enabled(dqm, mec: 0, pipe: i)) { |
1395 | dqm->dev->kfd2kgd->init_interrupts( |
1396 | dqm->dev->adev, i, xcc_id); |
1397 | } |
1398 | } |
1399 | } |
1400 | } |
1401 | |
1402 | static int initialize_nocpsch(struct device_queue_manager *dqm) |
1403 | { |
1404 | int pipe, queue; |
1405 | |
1406 | pr_debug("num of pipes: %d\n" , get_pipes_per_mec(dqm)); |
1407 | |
1408 | dqm->allocated_queues = kcalloc(n: get_pipes_per_mec(dqm), |
1409 | size: sizeof(unsigned int), GFP_KERNEL); |
1410 | if (!dqm->allocated_queues) |
1411 | return -ENOMEM; |
1412 | |
1413 | mutex_init(&dqm->lock_hidden); |
1414 | INIT_LIST_HEAD(list: &dqm->queues); |
1415 | dqm->active_queue_count = dqm->next_pipe_to_allocate = 0; |
1416 | dqm->active_cp_queue_count = 0; |
1417 | dqm->gws_queue_count = 0; |
1418 | |
1419 | for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { |
1420 | int pipe_offset = pipe * get_queues_per_pipe(dqm); |
1421 | |
1422 | for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) |
1423 | if (test_bit(pipe_offset + queue, |
1424 | dqm->dev->kfd->shared_resources.cp_queue_bitmap)) |
1425 | dqm->allocated_queues[pipe] |= 1 << queue; |
1426 | } |
1427 | |
1428 | memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid)); |
1429 | |
1430 | init_sdma_bitmaps(dqm); |
1431 | |
1432 | return 0; |
1433 | } |
1434 | |
1435 | static void uninitialize(struct device_queue_manager *dqm) |
1436 | { |
1437 | int i; |
1438 | |
1439 | WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0); |
1440 | |
1441 | kfree(objp: dqm->allocated_queues); |
1442 | for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++) |
1443 | kfree(objp: dqm->mqd_mgrs[i]); |
1444 | mutex_destroy(lock: &dqm->lock_hidden); |
1445 | } |
1446 | |
1447 | static int start_nocpsch(struct device_queue_manager *dqm) |
1448 | { |
1449 | int r = 0; |
1450 | |
1451 | pr_info("SW scheduler is used" ); |
1452 | init_interrupts(dqm); |
1453 | |
1454 | if (dqm->dev->adev->asic_type == CHIP_HAWAII) |
1455 | r = pm_init(pm: &dqm->packet_mgr, dqm); |
1456 | if (!r) |
1457 | dqm->sched_running = true; |
1458 | |
1459 | return r; |
1460 | } |
1461 | |
1462 | static int stop_nocpsch(struct device_queue_manager *dqm) |
1463 | { |
1464 | dqm_lock(dqm); |
1465 | if (!dqm->sched_running) { |
1466 | dqm_unlock(dqm); |
1467 | return 0; |
1468 | } |
1469 | |
1470 | if (dqm->dev->adev->asic_type == CHIP_HAWAII) |
1471 | pm_uninit(pm: &dqm->packet_mgr, hanging: false); |
1472 | dqm->sched_running = false; |
1473 | dqm_unlock(dqm); |
1474 | |
1475 | return 0; |
1476 | } |
1477 | |
1478 | static void pre_reset(struct device_queue_manager *dqm) |
1479 | { |
1480 | dqm_lock(dqm); |
1481 | dqm->is_resetting = true; |
1482 | dqm_unlock(dqm); |
1483 | } |
1484 | |
1485 | static int allocate_sdma_queue(struct device_queue_manager *dqm, |
1486 | struct queue *q, const uint32_t *restore_sdma_id) |
1487 | { |
1488 | struct device *dev = dqm->dev->adev->dev; |
1489 | int bit; |
1490 | |
1491 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { |
1492 | if (bitmap_empty(src: dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) { |
1493 | dev_err(dev, "No more SDMA queue to allocate\n" ); |
1494 | return -ENOMEM; |
1495 | } |
1496 | |
1497 | if (restore_sdma_id) { |
1498 | /* Re-use existing sdma_id */ |
1499 | if (!test_bit(*restore_sdma_id, dqm->sdma_bitmap)) { |
1500 | dev_err(dev, "SDMA queue already in use\n" ); |
1501 | return -EBUSY; |
1502 | } |
1503 | clear_bit(nr: *restore_sdma_id, addr: dqm->sdma_bitmap); |
1504 | q->sdma_id = *restore_sdma_id; |
1505 | } else { |
1506 | /* Find first available sdma_id */ |
1507 | bit = find_first_bit(addr: dqm->sdma_bitmap, |
1508 | size: get_num_sdma_queues(dqm)); |
1509 | clear_bit(nr: bit, addr: dqm->sdma_bitmap); |
1510 | q->sdma_id = bit; |
1511 | } |
1512 | |
1513 | q->properties.sdma_engine_id = |
1514 | q->sdma_id % kfd_get_num_sdma_engines(kdev: dqm->dev); |
1515 | q->properties.sdma_queue_id = q->sdma_id / |
1516 | kfd_get_num_sdma_engines(kdev: dqm->dev); |
1517 | } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { |
1518 | if (bitmap_empty(src: dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) { |
1519 | dev_err(dev, "No more XGMI SDMA queue to allocate\n" ); |
1520 | return -ENOMEM; |
1521 | } |
1522 | if (restore_sdma_id) { |
1523 | /* Re-use existing sdma_id */ |
1524 | if (!test_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap)) { |
1525 | dev_err(dev, "SDMA queue already in use\n" ); |
1526 | return -EBUSY; |
1527 | } |
1528 | clear_bit(nr: *restore_sdma_id, addr: dqm->xgmi_sdma_bitmap); |
1529 | q->sdma_id = *restore_sdma_id; |
1530 | } else { |
1531 | bit = find_first_bit(addr: dqm->xgmi_sdma_bitmap, |
1532 | size: get_num_xgmi_sdma_queues(dqm)); |
1533 | clear_bit(nr: bit, addr: dqm->xgmi_sdma_bitmap); |
1534 | q->sdma_id = bit; |
1535 | } |
1536 | /* sdma_engine_id is sdma id including |
1537 | * both PCIe-optimized SDMAs and XGMI- |
1538 | * optimized SDMAs. The calculation below |
1539 | * assumes the first N engines are always |
1540 | * PCIe-optimized ones |
1541 | */ |
1542 | q->properties.sdma_engine_id = |
1543 | kfd_get_num_sdma_engines(kdev: dqm->dev) + |
1544 | q->sdma_id % kfd_get_num_xgmi_sdma_engines(kdev: dqm->dev); |
1545 | q->properties.sdma_queue_id = q->sdma_id / |
1546 | kfd_get_num_xgmi_sdma_engines(kdev: dqm->dev); |
1547 | } |
1548 | |
1549 | pr_debug("SDMA engine id: %d\n" , q->properties.sdma_engine_id); |
1550 | pr_debug("SDMA queue id: %d\n" , q->properties.sdma_queue_id); |
1551 | |
1552 | return 0; |
1553 | } |
1554 | |
1555 | static void deallocate_sdma_queue(struct device_queue_manager *dqm, |
1556 | struct queue *q) |
1557 | { |
1558 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { |
1559 | if (q->sdma_id >= get_num_sdma_queues(dqm)) |
1560 | return; |
1561 | set_bit(nr: q->sdma_id, addr: dqm->sdma_bitmap); |
1562 | } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { |
1563 | if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm)) |
1564 | return; |
1565 | set_bit(nr: q->sdma_id, addr: dqm->xgmi_sdma_bitmap); |
1566 | } |
1567 | } |
1568 | |
1569 | /* |
1570 | * Device Queue Manager implementation for cp scheduler |
1571 | */ |
1572 | |
1573 | static int set_sched_resources(struct device_queue_manager *dqm) |
1574 | { |
1575 | int i, mec; |
1576 | struct scheduling_resources res; |
1577 | struct device *dev = dqm->dev->adev->dev; |
1578 | |
1579 | res.vmid_mask = dqm->dev->compute_vmid_bitmap; |
1580 | |
1581 | res.queue_mask = 0; |
1582 | for (i = 0; i < AMDGPU_MAX_QUEUES; ++i) { |
1583 | mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe) |
1584 | / dqm->dev->kfd->shared_resources.num_pipe_per_mec; |
1585 | |
1586 | if (!test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap)) |
1587 | continue; |
1588 | |
1589 | /* only acquire queues from the first MEC */ |
1590 | if (mec > 0) |
1591 | continue; |
1592 | |
1593 | /* This situation may be hit in the future if a new HW |
1594 | * generation exposes more than 64 queues. If so, the |
1595 | * definition of res.queue_mask needs updating |
1596 | */ |
1597 | if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) { |
1598 | dev_err(dev, "Invalid queue enabled by amdgpu: %d\n" , i); |
1599 | break; |
1600 | } |
1601 | |
1602 | res.queue_mask |= 1ull |
1603 | << amdgpu_queue_mask_bit_to_set_resource_bit( |
1604 | adev: dqm->dev->adev, queue_bit: i); |
1605 | } |
1606 | res.gws_mask = ~0ull; |
1607 | res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0; |
1608 | |
1609 | pr_debug("Scheduling resources:\n" |
1610 | "vmid mask: 0x%8X\n" |
1611 | "queue mask: 0x%8llX\n" , |
1612 | res.vmid_mask, res.queue_mask); |
1613 | |
1614 | return pm_send_set_resources(pm: &dqm->packet_mgr, res: &res); |
1615 | } |
1616 | |
1617 | static int initialize_cpsch(struct device_queue_manager *dqm) |
1618 | { |
1619 | pr_debug("num of pipes: %d\n" , get_pipes_per_mec(dqm)); |
1620 | |
1621 | mutex_init(&dqm->lock_hidden); |
1622 | INIT_LIST_HEAD(list: &dqm->queues); |
1623 | dqm->active_queue_count = dqm->processes_count = 0; |
1624 | dqm->active_cp_queue_count = 0; |
1625 | dqm->gws_queue_count = 0; |
1626 | dqm->active_runlist = false; |
1627 | INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception); |
1628 | dqm->trap_debug_vmid = 0; |
1629 | |
1630 | init_sdma_bitmaps(dqm); |
1631 | |
1632 | if (dqm->dev->kfd2kgd->get_iq_wait_times) |
1633 | dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev, |
1634 | &dqm->wait_times, |
1635 | ffs(dqm->dev->xcc_mask) - 1); |
1636 | return 0; |
1637 | } |
1638 | |
1639 | static int start_cpsch(struct device_queue_manager *dqm) |
1640 | { |
1641 | struct device *dev = dqm->dev->adev->dev; |
1642 | int retval; |
1643 | |
1644 | retval = 0; |
1645 | |
1646 | dqm_lock(dqm); |
1647 | |
1648 | if (!dqm->dev->kfd->shared_resources.enable_mes) { |
1649 | retval = pm_init(pm: &dqm->packet_mgr, dqm); |
1650 | if (retval) |
1651 | goto fail_packet_manager_init; |
1652 | |
1653 | retval = set_sched_resources(dqm); |
1654 | if (retval) |
1655 | goto fail_set_sched_resources; |
1656 | } |
1657 | pr_debug("Allocating fence memory\n" ); |
1658 | |
1659 | /* allocate fence memory on the gart */ |
1660 | retval = kfd_gtt_sa_allocate(node: dqm->dev, size: sizeof(*dqm->fence_addr), |
1661 | mem_obj: &dqm->fence_mem); |
1662 | |
1663 | if (retval) |
1664 | goto fail_allocate_vidmem; |
1665 | |
1666 | dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr; |
1667 | dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr; |
1668 | |
1669 | init_interrupts(dqm); |
1670 | |
1671 | /* clear hang status when driver try to start the hw scheduler */ |
1672 | dqm->is_hws_hang = false; |
1673 | dqm->is_resetting = false; |
1674 | dqm->sched_running = true; |
1675 | |
1676 | if (!dqm->dev->kfd->shared_resources.enable_mes) |
1677 | execute_queues_cpsch(dqm, filter: KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, filter_param: 0, USE_DEFAULT_GRACE_PERIOD); |
1678 | |
1679 | /* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */ |
1680 | if (amdgpu_emu_mode == 0 && dqm->dev->adev->gmc.is_app_apu && |
1681 | (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))) { |
1682 | uint32_t reg_offset = 0; |
1683 | uint32_t grace_period = 1; |
1684 | |
1685 | retval = pm_update_grace_period(pm: &dqm->packet_mgr, |
1686 | grace_period); |
1687 | if (retval) |
1688 | dev_err(dev, "Setting grace timeout failed\n" ); |
1689 | else if (dqm->dev->kfd2kgd->build_grace_period_packet_info) |
1690 | /* Update dqm->wait_times maintained in software */ |
1691 | dqm->dev->kfd2kgd->build_grace_period_packet_info( |
1692 | dqm->dev->adev, dqm->wait_times, |
1693 | grace_period, ®_offset, |
1694 | &dqm->wait_times); |
1695 | } |
1696 | |
1697 | dqm_unlock(dqm); |
1698 | |
1699 | return 0; |
1700 | fail_allocate_vidmem: |
1701 | fail_set_sched_resources: |
1702 | if (!dqm->dev->kfd->shared_resources.enable_mes) |
1703 | pm_uninit(pm: &dqm->packet_mgr, hanging: false); |
1704 | fail_packet_manager_init: |
1705 | dqm_unlock(dqm); |
1706 | return retval; |
1707 | } |
1708 | |
1709 | static int stop_cpsch(struct device_queue_manager *dqm) |
1710 | { |
1711 | bool hanging; |
1712 | |
1713 | dqm_lock(dqm); |
1714 | if (!dqm->sched_running) { |
1715 | dqm_unlock(dqm); |
1716 | return 0; |
1717 | } |
1718 | |
1719 | if (!dqm->is_hws_hang) { |
1720 | if (!dqm->dev->kfd->shared_resources.enable_mes) |
1721 | unmap_queues_cpsch(dqm, filter: KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, filter_param: 0, USE_DEFAULT_GRACE_PERIOD, reset: false); |
1722 | else |
1723 | remove_all_queues_mes(dqm); |
1724 | } |
1725 | |
1726 | hanging = dqm->is_hws_hang || dqm->is_resetting; |
1727 | dqm->sched_running = false; |
1728 | |
1729 | if (!dqm->dev->kfd->shared_resources.enable_mes) |
1730 | pm_release_ib(pm: &dqm->packet_mgr); |
1731 | |
1732 | kfd_gtt_sa_free(node: dqm->dev, mem_obj: dqm->fence_mem); |
1733 | if (!dqm->dev->kfd->shared_resources.enable_mes) |
1734 | pm_uninit(pm: &dqm->packet_mgr, hanging); |
1735 | dqm_unlock(dqm); |
1736 | |
1737 | return 0; |
1738 | } |
1739 | |
1740 | static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, |
1741 | struct kernel_queue *kq, |
1742 | struct qcm_process_device *qpd) |
1743 | { |
1744 | dqm_lock(dqm); |
1745 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { |
1746 | pr_warn("Can't create new kernel queue because %d queues were already created\n" , |
1747 | dqm->total_queue_count); |
1748 | dqm_unlock(dqm); |
1749 | return -EPERM; |
1750 | } |
1751 | |
1752 | /* |
1753 | * Unconditionally increment this counter, regardless of the queue's |
1754 | * type or whether the queue is active. |
1755 | */ |
1756 | dqm->total_queue_count++; |
1757 | pr_debug("Total of %d queues are accountable so far\n" , |
1758 | dqm->total_queue_count); |
1759 | |
1760 | list_add(new: &kq->list, head: &qpd->priv_queue_list); |
1761 | increment_queue_count(dqm, qpd, q: kq->queue); |
1762 | qpd->is_debug = true; |
1763 | execute_queues_cpsch(dqm, filter: KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, filter_param: 0, |
1764 | USE_DEFAULT_GRACE_PERIOD); |
1765 | dqm_unlock(dqm); |
1766 | |
1767 | return 0; |
1768 | } |
1769 | |
1770 | static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, |
1771 | struct kernel_queue *kq, |
1772 | struct qcm_process_device *qpd) |
1773 | { |
1774 | dqm_lock(dqm); |
1775 | list_del(entry: &kq->list); |
1776 | decrement_queue_count(dqm, qpd, q: kq->queue); |
1777 | qpd->is_debug = false; |
1778 | execute_queues_cpsch(dqm, filter: KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, filter_param: 0, |
1779 | USE_DEFAULT_GRACE_PERIOD); |
1780 | /* |
1781 | * Unconditionally decrement this counter, regardless of the queue's |
1782 | * type. |
1783 | */ |
1784 | dqm->total_queue_count--; |
1785 | pr_debug("Total of %d queues are accountable so far\n" , |
1786 | dqm->total_queue_count); |
1787 | dqm_unlock(dqm); |
1788 | } |
1789 | |
1790 | static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, |
1791 | struct qcm_process_device *qpd, |
1792 | const struct kfd_criu_queue_priv_data *qd, |
1793 | const void *restore_mqd, const void *restore_ctl_stack) |
1794 | { |
1795 | int retval; |
1796 | struct mqd_manager *mqd_mgr; |
1797 | |
1798 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { |
1799 | pr_warn("Can't create new usermode queue because %d queues were already created\n" , |
1800 | dqm->total_queue_count); |
1801 | retval = -EPERM; |
1802 | goto out; |
1803 | } |
1804 | |
1805 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA || |
1806 | q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { |
1807 | dqm_lock(dqm); |
1808 | retval = allocate_sdma_queue(dqm, q, restore_sdma_id: qd ? &qd->sdma_id : NULL); |
1809 | dqm_unlock(dqm); |
1810 | if (retval) |
1811 | goto out; |
1812 | } |
1813 | |
1814 | retval = allocate_doorbell(qpd, q, restore_id: qd ? &qd->doorbell_id : NULL); |
1815 | if (retval) |
1816 | goto out_deallocate_sdma_queue; |
1817 | |
1818 | mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( |
1819 | type: q->properties.type)]; |
1820 | |
1821 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA || |
1822 | q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) |
1823 | dqm->asic_ops.init_sdma_vm(dqm, q, qpd); |
1824 | q->properties.tba_addr = qpd->tba_addr; |
1825 | q->properties.tma_addr = qpd->tma_addr; |
1826 | q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties); |
1827 | if (!q->mqd_mem_obj) { |
1828 | retval = -ENOMEM; |
1829 | goto out_deallocate_doorbell; |
1830 | } |
1831 | |
1832 | dqm_lock(dqm); |
1833 | /* |
1834 | * Eviction state logic: mark all queues as evicted, even ones |
1835 | * not currently active. Restoring inactive queues later only |
1836 | * updates the is_evicted flag but is a no-op otherwise. |
1837 | */ |
1838 | q->properties.is_evicted = !!qpd->evicted; |
1839 | q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled && |
1840 | kfd_dbg_has_cwsr_workaround(dev: q->device); |
1841 | |
1842 | if (qd) |
1843 | mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, |
1844 | &q->properties, restore_mqd, restore_ctl_stack, |
1845 | qd->ctl_stack_size); |
1846 | else |
1847 | mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, |
1848 | &q->gart_mqd_addr, &q->properties); |
1849 | |
1850 | list_add(new: &q->list, head: &qpd->queues_list); |
1851 | qpd->queue_count++; |
1852 | |
1853 | if (q->properties.is_active) { |
1854 | increment_queue_count(dqm, qpd, q); |
1855 | |
1856 | if (!dqm->dev->kfd->shared_resources.enable_mes) |
1857 | retval = execute_queues_cpsch(dqm, |
1858 | filter: KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, filter_param: 0, USE_DEFAULT_GRACE_PERIOD); |
1859 | else |
1860 | retval = add_queue_mes(dqm, q, qpd); |
1861 | if (retval) |
1862 | goto cleanup_queue; |
1863 | } |
1864 | |
1865 | /* |
1866 | * Unconditionally increment this counter, regardless of the queue's |
1867 | * type or whether the queue is active. |
1868 | */ |
1869 | dqm->total_queue_count++; |
1870 | |
1871 | pr_debug("Total of %d queues are accountable so far\n" , |
1872 | dqm->total_queue_count); |
1873 | |
1874 | dqm_unlock(dqm); |
1875 | return retval; |
1876 | |
1877 | cleanup_queue: |
1878 | qpd->queue_count--; |
1879 | list_del(entry: &q->list); |
1880 | if (q->properties.is_active) |
1881 | decrement_queue_count(dqm, qpd, q); |
1882 | mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); |
1883 | dqm_unlock(dqm); |
1884 | out_deallocate_doorbell: |
1885 | deallocate_doorbell(qpd, q); |
1886 | out_deallocate_sdma_queue: |
1887 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA || |
1888 | q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { |
1889 | dqm_lock(dqm); |
1890 | deallocate_sdma_queue(dqm, q); |
1891 | dqm_unlock(dqm); |
1892 | } |
1893 | out: |
1894 | return retval; |
1895 | } |
1896 | |
1897 | int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm, |
1898 | uint64_t fence_value, |
1899 | unsigned int timeout_ms) |
1900 | { |
1901 | unsigned long end_jiffies = msecs_to_jiffies(m: timeout_ms) + jiffies; |
1902 | struct device *dev = dqm->dev->adev->dev; |
1903 | uint64_t *fence_addr = dqm->fence_addr; |
1904 | |
1905 | while (*fence_addr != fence_value) { |
1906 | if (time_after(jiffies, end_jiffies)) { |
1907 | dev_err(dev, "qcm fence wait loop timeout expired\n" ); |
1908 | /* In HWS case, this is used to halt the driver thread |
1909 | * in order not to mess up CP states before doing |
1910 | * scandumps for FW debugging. |
1911 | */ |
1912 | while (halt_if_hws_hang) |
1913 | schedule(); |
1914 | |
1915 | return -ETIME; |
1916 | } |
1917 | schedule(); |
1918 | } |
1919 | |
1920 | return 0; |
1921 | } |
1922 | |
1923 | /* dqm->lock mutex has to be locked before calling this function */ |
1924 | static int map_queues_cpsch(struct device_queue_manager *dqm) |
1925 | { |
1926 | struct device *dev = dqm->dev->adev->dev; |
1927 | int retval; |
1928 | |
1929 | if (!dqm->sched_running) |
1930 | return 0; |
1931 | if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0) |
1932 | return 0; |
1933 | if (dqm->active_runlist) |
1934 | return 0; |
1935 | |
1936 | retval = pm_send_runlist(pm: &dqm->packet_mgr, dqm_queues: &dqm->queues); |
1937 | pr_debug("%s sent runlist\n" , __func__); |
1938 | if (retval) { |
1939 | dev_err(dev, "failed to execute runlist\n" ); |
1940 | return retval; |
1941 | } |
1942 | dqm->active_runlist = true; |
1943 | |
1944 | return retval; |
1945 | } |
1946 | |
1947 | /* dqm->lock mutex has to be locked before calling this function */ |
1948 | static int unmap_queues_cpsch(struct device_queue_manager *dqm, |
1949 | enum kfd_unmap_queues_filter filter, |
1950 | uint32_t filter_param, |
1951 | uint32_t grace_period, |
1952 | bool reset) |
1953 | { |
1954 | struct device *dev = dqm->dev->adev->dev; |
1955 | struct mqd_manager *mqd_mgr; |
1956 | int retval = 0; |
1957 | |
1958 | if (!dqm->sched_running) |
1959 | return 0; |
1960 | if (dqm->is_hws_hang || dqm->is_resetting) |
1961 | return -EIO; |
1962 | if (!dqm->active_runlist) |
1963 | return retval; |
1964 | |
1965 | if (grace_period != USE_DEFAULT_GRACE_PERIOD) { |
1966 | retval = pm_update_grace_period(pm: &dqm->packet_mgr, grace_period); |
1967 | if (retval) |
1968 | return retval; |
1969 | } |
1970 | |
1971 | retval = pm_send_unmap_queue(pm: &dqm->packet_mgr, mode: filter, filter_param, reset); |
1972 | if (retval) |
1973 | return retval; |
1974 | |
1975 | *dqm->fence_addr = KFD_FENCE_INIT; |
1976 | pm_send_query_status(pm: &dqm->packet_mgr, fence_address: dqm->fence_gpu_addr, |
1977 | KFD_FENCE_COMPLETED); |
1978 | /* should be timed out */ |
1979 | retval = amdkfd_fence_wait_timeout(dqm, KFD_FENCE_COMPLETED, |
1980 | timeout_ms: queue_preemption_timeout_ms); |
1981 | if (retval) { |
1982 | dev_err(dev, "The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n" ); |
1983 | kfd_hws_hang(dqm); |
1984 | return retval; |
1985 | } |
1986 | |
1987 | /* In the current MEC firmware implementation, if compute queue |
1988 | * doesn't response to the preemption request in time, HIQ will |
1989 | * abandon the unmap request without returning any timeout error |
1990 | * to driver. Instead, MEC firmware will log the doorbell of the |
1991 | * unresponding compute queue to HIQ.MQD.queue_doorbell_id fields. |
1992 | * To make sure the queue unmap was successful, driver need to |
1993 | * check those fields |
1994 | */ |
1995 | mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]; |
1996 | if (mqd_mgr->read_doorbell_id(dqm->packet_mgr.priv_queue->queue->mqd)) { |
1997 | dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n" ); |
1998 | while (halt_if_hws_hang) |
1999 | schedule(); |
2000 | return -ETIME; |
2001 | } |
2002 | |
2003 | /* We need to reset the grace period value for this device */ |
2004 | if (grace_period != USE_DEFAULT_GRACE_PERIOD) { |
2005 | if (pm_update_grace_period(pm: &dqm->packet_mgr, |
2006 | USE_DEFAULT_GRACE_PERIOD)) |
2007 | dev_err(dev, "Failed to reset grace period\n" ); |
2008 | } |
2009 | |
2010 | pm_release_ib(pm: &dqm->packet_mgr); |
2011 | dqm->active_runlist = false; |
2012 | |
2013 | return retval; |
2014 | } |
2015 | |
2016 | /* only for compute queue */ |
2017 | static int reset_queues_cpsch(struct device_queue_manager *dqm, |
2018 | uint16_t pasid) |
2019 | { |
2020 | int retval; |
2021 | |
2022 | dqm_lock(dqm); |
2023 | |
2024 | retval = unmap_queues_cpsch(dqm, filter: KFD_UNMAP_QUEUES_FILTER_BY_PASID, |
2025 | filter_param: pasid, USE_DEFAULT_GRACE_PERIOD, reset: true); |
2026 | |
2027 | dqm_unlock(dqm); |
2028 | return retval; |
2029 | } |
2030 | |
2031 | /* dqm->lock mutex has to be locked before calling this function */ |
2032 | static int execute_queues_cpsch(struct device_queue_manager *dqm, |
2033 | enum kfd_unmap_queues_filter filter, |
2034 | uint32_t filter_param, |
2035 | uint32_t grace_period) |
2036 | { |
2037 | int retval; |
2038 | |
2039 | if (dqm->is_hws_hang) |
2040 | return -EIO; |
2041 | retval = unmap_queues_cpsch(dqm, filter, filter_param, grace_period, reset: false); |
2042 | if (retval) |
2043 | return retval; |
2044 | |
2045 | return map_queues_cpsch(dqm); |
2046 | } |
2047 | |
2048 | static int wait_on_destroy_queue(struct device_queue_manager *dqm, |
2049 | struct queue *q) |
2050 | { |
2051 | struct kfd_process_device *pdd = kfd_get_process_device_data(dev: q->device, |
2052 | p: q->process); |
2053 | int ret = 0; |
2054 | |
2055 | if (pdd->qpd.is_debug) |
2056 | return ret; |
2057 | |
2058 | q->properties.is_being_destroyed = true; |
2059 | |
2060 | if (pdd->process->debug_trap_enabled && q->properties.is_suspended) { |
2061 | dqm_unlock(dqm); |
2062 | mutex_unlock(lock: &q->process->mutex); |
2063 | ret = wait_event_interruptible(dqm->destroy_wait, |
2064 | !q->properties.is_suspended); |
2065 | |
2066 | mutex_lock(&q->process->mutex); |
2067 | dqm_lock(dqm); |
2068 | } |
2069 | |
2070 | return ret; |
2071 | } |
2072 | |
2073 | static int destroy_queue_cpsch(struct device_queue_manager *dqm, |
2074 | struct qcm_process_device *qpd, |
2075 | struct queue *q) |
2076 | { |
2077 | int retval; |
2078 | struct mqd_manager *mqd_mgr; |
2079 | uint64_t sdma_val = 0; |
2080 | struct kfd_process_device *pdd = qpd_to_pdd(qpd); |
2081 | struct device *dev = dqm->dev->adev->dev; |
2082 | |
2083 | /* Get the SDMA queue stats */ |
2084 | if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) || |
2085 | (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { |
2086 | retval = read_sdma_queue_counter(q_rptr: (uint64_t __user *)q->properties.read_ptr, |
2087 | val: &sdma_val); |
2088 | if (retval) |
2089 | dev_err(dev, "Failed to read SDMA queue counter for queue: %d\n" , |
2090 | q->properties.queue_id); |
2091 | } |
2092 | |
2093 | /* remove queue from list to prevent rescheduling after preemption */ |
2094 | dqm_lock(dqm); |
2095 | |
2096 | retval = wait_on_destroy_queue(dqm, q); |
2097 | |
2098 | if (retval) { |
2099 | dqm_unlock(dqm); |
2100 | return retval; |
2101 | } |
2102 | |
2103 | if (qpd->is_debug) { |
2104 | /* |
2105 | * error, currently we do not allow to destroy a queue |
2106 | * of a currently debugged process |
2107 | */ |
2108 | retval = -EBUSY; |
2109 | goto failed_try_destroy_debugged_queue; |
2110 | |
2111 | } |
2112 | |
2113 | mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( |
2114 | type: q->properties.type)]; |
2115 | |
2116 | deallocate_doorbell(qpd, q); |
2117 | |
2118 | if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) || |
2119 | (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { |
2120 | deallocate_sdma_queue(dqm, q); |
2121 | pdd->sdma_past_activity_counter += sdma_val; |
2122 | } |
2123 | |
2124 | list_del(entry: &q->list); |
2125 | qpd->queue_count--; |
2126 | if (q->properties.is_active) { |
2127 | decrement_queue_count(dqm, qpd, q); |
2128 | if (!dqm->dev->kfd->shared_resources.enable_mes) { |
2129 | retval = execute_queues_cpsch(dqm, |
2130 | filter: KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, filter_param: 0, |
2131 | USE_DEFAULT_GRACE_PERIOD); |
2132 | if (retval == -ETIME) |
2133 | qpd->reset_wavefronts = true; |
2134 | } else { |
2135 | retval = remove_queue_mes(dqm, q, qpd); |
2136 | } |
2137 | } |
2138 | |
2139 | /* |
2140 | * Unconditionally decrement this counter, regardless of the queue's |
2141 | * type |
2142 | */ |
2143 | dqm->total_queue_count--; |
2144 | pr_debug("Total of %d queues are accountable so far\n" , |
2145 | dqm->total_queue_count); |
2146 | |
2147 | dqm_unlock(dqm); |
2148 | |
2149 | /* |
2150 | * Do free_mqd and raise delete event after dqm_unlock(dqm) to avoid |
2151 | * circular locking |
2152 | */ |
2153 | kfd_dbg_ev_raise(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE), |
2154 | process: qpd->pqm->process, dev: q->device, |
2155 | source_id: -1, use_worker: false, NULL, exception_data_size: 0); |
2156 | |
2157 | mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); |
2158 | |
2159 | return retval; |
2160 | |
2161 | failed_try_destroy_debugged_queue: |
2162 | |
2163 | dqm_unlock(dqm); |
2164 | return retval; |
2165 | } |
2166 | |
2167 | /* |
2168 | * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to |
2169 | * stay in user mode. |
2170 | */ |
2171 | #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL |
2172 | /* APE1 limit is inclusive and 64K aligned. */ |
2173 | #define APE1_LIMIT_ALIGNMENT 0xFFFF |
2174 | |
2175 | static bool set_cache_memory_policy(struct device_queue_manager *dqm, |
2176 | struct qcm_process_device *qpd, |
2177 | enum cache_policy default_policy, |
2178 | enum cache_policy alternate_policy, |
2179 | void __user *alternate_aperture_base, |
2180 | uint64_t alternate_aperture_size) |
2181 | { |
2182 | bool retval = true; |
2183 | |
2184 | if (!dqm->asic_ops.set_cache_memory_policy) |
2185 | return retval; |
2186 | |
2187 | dqm_lock(dqm); |
2188 | |
2189 | if (alternate_aperture_size == 0) { |
2190 | /* base > limit disables APE1 */ |
2191 | qpd->sh_mem_ape1_base = 1; |
2192 | qpd->sh_mem_ape1_limit = 0; |
2193 | } else { |
2194 | /* |
2195 | * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]}, |
2196 | * SH_MEM_APE1_BASE[31:0], 0x0000 } |
2197 | * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]}, |
2198 | * SH_MEM_APE1_LIMIT[31:0], 0xFFFF } |
2199 | * Verify that the base and size parameters can be |
2200 | * represented in this format and convert them. |
2201 | * Additionally restrict APE1 to user-mode addresses. |
2202 | */ |
2203 | |
2204 | uint64_t base = (uintptr_t)alternate_aperture_base; |
2205 | uint64_t limit = base + alternate_aperture_size - 1; |
2206 | |
2207 | if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 || |
2208 | (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) { |
2209 | retval = false; |
2210 | goto out; |
2211 | } |
2212 | |
2213 | qpd->sh_mem_ape1_base = base >> 16; |
2214 | qpd->sh_mem_ape1_limit = limit >> 16; |
2215 | } |
2216 | |
2217 | retval = dqm->asic_ops.set_cache_memory_policy( |
2218 | dqm, |
2219 | qpd, |
2220 | default_policy, |
2221 | alternate_policy, |
2222 | alternate_aperture_base, |
2223 | alternate_aperture_size); |
2224 | |
2225 | if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0)) |
2226 | program_sh_mem_settings(dqm, qpd); |
2227 | |
2228 | pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n" , |
2229 | qpd->sh_mem_config, qpd->sh_mem_ape1_base, |
2230 | qpd->sh_mem_ape1_limit); |
2231 | |
2232 | out: |
2233 | dqm_unlock(dqm); |
2234 | return retval; |
2235 | } |
2236 | |
2237 | static int process_termination_nocpsch(struct device_queue_manager *dqm, |
2238 | struct qcm_process_device *qpd) |
2239 | { |
2240 | struct queue *q; |
2241 | struct device_process_node *cur, *next_dpn; |
2242 | int retval = 0; |
2243 | bool found = false; |
2244 | |
2245 | dqm_lock(dqm); |
2246 | |
2247 | /* Clear all user mode queues */ |
2248 | while (!list_empty(head: &qpd->queues_list)) { |
2249 | struct mqd_manager *mqd_mgr; |
2250 | int ret; |
2251 | |
2252 | q = list_first_entry(&qpd->queues_list, struct queue, list); |
2253 | mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( |
2254 | type: q->properties.type)]; |
2255 | ret = destroy_queue_nocpsch_locked(dqm, qpd, q); |
2256 | if (ret) |
2257 | retval = ret; |
2258 | dqm_unlock(dqm); |
2259 | mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); |
2260 | dqm_lock(dqm); |
2261 | } |
2262 | |
2263 | /* Unregister process */ |
2264 | list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) { |
2265 | if (qpd == cur->qpd) { |
2266 | list_del(entry: &cur->list); |
2267 | kfree(objp: cur); |
2268 | dqm->processes_count--; |
2269 | found = true; |
2270 | break; |
2271 | } |
2272 | } |
2273 | |
2274 | dqm_unlock(dqm); |
2275 | |
2276 | /* Outside the DQM lock because under the DQM lock we can't do |
2277 | * reclaim or take other locks that others hold while reclaiming. |
2278 | */ |
2279 | if (found) |
2280 | kfd_dec_compute_active(dev: dqm->dev); |
2281 | |
2282 | return retval; |
2283 | } |
2284 | |
2285 | static int get_wave_state(struct device_queue_manager *dqm, |
2286 | struct queue *q, |
2287 | void __user *ctl_stack, |
2288 | u32 *ctl_stack_used_size, |
2289 | u32 *save_area_used_size) |
2290 | { |
2291 | struct mqd_manager *mqd_mgr; |
2292 | |
2293 | dqm_lock(dqm); |
2294 | |
2295 | mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; |
2296 | |
2297 | if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE || |
2298 | q->properties.is_active || !q->device->kfd->cwsr_enabled || |
2299 | !mqd_mgr->get_wave_state) { |
2300 | dqm_unlock(dqm); |
2301 | return -EINVAL; |
2302 | } |
2303 | |
2304 | dqm_unlock(dqm); |
2305 | |
2306 | /* |
2307 | * get_wave_state is outside the dqm lock to prevent circular locking |
2308 | * and the queue should be protected against destruction by the process |
2309 | * lock. |
2310 | */ |
2311 | return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, &q->properties, |
2312 | ctl_stack, ctl_stack_used_size, save_area_used_size); |
2313 | } |
2314 | |
2315 | static void get_queue_checkpoint_info(struct device_queue_manager *dqm, |
2316 | const struct queue *q, |
2317 | u32 *mqd_size, |
2318 | u32 *ctl_stack_size) |
2319 | { |
2320 | struct mqd_manager *mqd_mgr; |
2321 | enum KFD_MQD_TYPE mqd_type = |
2322 | get_mqd_type_from_queue_type(type: q->properties.type); |
2323 | |
2324 | dqm_lock(dqm); |
2325 | mqd_mgr = dqm->mqd_mgrs[mqd_type]; |
2326 | *mqd_size = mqd_mgr->mqd_size; |
2327 | *ctl_stack_size = 0; |
2328 | |
2329 | if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info) |
2330 | mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size); |
2331 | |
2332 | dqm_unlock(dqm); |
2333 | } |
2334 | |
2335 | static int checkpoint_mqd(struct device_queue_manager *dqm, |
2336 | const struct queue *q, |
2337 | void *mqd, |
2338 | void *ctl_stack) |
2339 | { |
2340 | struct mqd_manager *mqd_mgr; |
2341 | int r = 0; |
2342 | enum KFD_MQD_TYPE mqd_type = |
2343 | get_mqd_type_from_queue_type(type: q->properties.type); |
2344 | |
2345 | dqm_lock(dqm); |
2346 | |
2347 | if (q->properties.is_active || !q->device->kfd->cwsr_enabled) { |
2348 | r = -EINVAL; |
2349 | goto dqm_unlock; |
2350 | } |
2351 | |
2352 | mqd_mgr = dqm->mqd_mgrs[mqd_type]; |
2353 | if (!mqd_mgr->checkpoint_mqd) { |
2354 | r = -EOPNOTSUPP; |
2355 | goto dqm_unlock; |
2356 | } |
2357 | |
2358 | mqd_mgr->checkpoint_mqd(mqd_mgr, q->mqd, mqd, ctl_stack); |
2359 | |
2360 | dqm_unlock: |
2361 | dqm_unlock(dqm); |
2362 | return r; |
2363 | } |
2364 | |
2365 | static int process_termination_cpsch(struct device_queue_manager *dqm, |
2366 | struct qcm_process_device *qpd) |
2367 | { |
2368 | int retval; |
2369 | struct queue *q; |
2370 | struct device *dev = dqm->dev->adev->dev; |
2371 | struct kernel_queue *kq, *kq_next; |
2372 | struct mqd_manager *mqd_mgr; |
2373 | struct device_process_node *cur, *next_dpn; |
2374 | enum kfd_unmap_queues_filter filter = |
2375 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES; |
2376 | bool found = false; |
2377 | |
2378 | retval = 0; |
2379 | |
2380 | dqm_lock(dqm); |
2381 | |
2382 | /* Clean all kernel queues */ |
2383 | list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) { |
2384 | list_del(entry: &kq->list); |
2385 | decrement_queue_count(dqm, qpd, q: kq->queue); |
2386 | qpd->is_debug = false; |
2387 | dqm->total_queue_count--; |
2388 | filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES; |
2389 | } |
2390 | |
2391 | /* Clear all user mode queues */ |
2392 | list_for_each_entry(q, &qpd->queues_list, list) { |
2393 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) |
2394 | deallocate_sdma_queue(dqm, q); |
2395 | else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) |
2396 | deallocate_sdma_queue(dqm, q); |
2397 | |
2398 | if (q->properties.is_active) { |
2399 | decrement_queue_count(dqm, qpd, q); |
2400 | |
2401 | if (dqm->dev->kfd->shared_resources.enable_mes) { |
2402 | retval = remove_queue_mes(dqm, q, qpd); |
2403 | if (retval) |
2404 | dev_err(dev, "Failed to remove queue %d\n" , |
2405 | q->properties.queue_id); |
2406 | } |
2407 | } |
2408 | |
2409 | dqm->total_queue_count--; |
2410 | } |
2411 | |
2412 | /* Unregister process */ |
2413 | list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) { |
2414 | if (qpd == cur->qpd) { |
2415 | list_del(entry: &cur->list); |
2416 | kfree(objp: cur); |
2417 | dqm->processes_count--; |
2418 | found = true; |
2419 | break; |
2420 | } |
2421 | } |
2422 | |
2423 | if (!dqm->dev->kfd->shared_resources.enable_mes) |
2424 | retval = execute_queues_cpsch(dqm, filter, filter_param: 0, USE_DEFAULT_GRACE_PERIOD); |
2425 | |
2426 | if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) { |
2427 | pr_warn("Resetting wave fronts (cpsch) on dev %p\n" , dqm->dev); |
2428 | dbgdev_wave_reset_wavefronts(dev: dqm->dev, p: qpd->pqm->process); |
2429 | qpd->reset_wavefronts = false; |
2430 | } |
2431 | |
2432 | /* Lastly, free mqd resources. |
2433 | * Do free_mqd() after dqm_unlock to avoid circular locking. |
2434 | */ |
2435 | while (!list_empty(head: &qpd->queues_list)) { |
2436 | q = list_first_entry(&qpd->queues_list, struct queue, list); |
2437 | mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( |
2438 | type: q->properties.type)]; |
2439 | list_del(entry: &q->list); |
2440 | qpd->queue_count--; |
2441 | dqm_unlock(dqm); |
2442 | mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); |
2443 | dqm_lock(dqm); |
2444 | } |
2445 | dqm_unlock(dqm); |
2446 | |
2447 | /* Outside the DQM lock because under the DQM lock we can't do |
2448 | * reclaim or take other locks that others hold while reclaiming. |
2449 | */ |
2450 | if (found) |
2451 | kfd_dec_compute_active(dev: dqm->dev); |
2452 | |
2453 | return retval; |
2454 | } |
2455 | |
2456 | static int init_mqd_managers(struct device_queue_manager *dqm) |
2457 | { |
2458 | int i, j; |
2459 | struct device *dev = dqm->dev->adev->dev; |
2460 | struct mqd_manager *mqd_mgr; |
2461 | |
2462 | for (i = 0; i < KFD_MQD_TYPE_MAX; i++) { |
2463 | mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev); |
2464 | if (!mqd_mgr) { |
2465 | dev_err(dev, "mqd manager [%d] initialization failed\n" , i); |
2466 | goto out_free; |
2467 | } |
2468 | dqm->mqd_mgrs[i] = mqd_mgr; |
2469 | } |
2470 | |
2471 | return 0; |
2472 | |
2473 | out_free: |
2474 | for (j = 0; j < i; j++) { |
2475 | kfree(objp: dqm->mqd_mgrs[j]); |
2476 | dqm->mqd_mgrs[j] = NULL; |
2477 | } |
2478 | |
2479 | return -ENOMEM; |
2480 | } |
2481 | |
2482 | /* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/ |
2483 | static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) |
2484 | { |
2485 | int retval; |
2486 | struct kfd_node *dev = dqm->dev; |
2487 | struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd; |
2488 | uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * |
2489 | get_num_all_sdma_engines(dqm) * |
2490 | dev->kfd->device_info.num_sdma_queues_per_engine + |
2491 | (dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * |
2492 | NUM_XCC(dqm->dev->xcc_mask)); |
2493 | |
2494 | retval = amdgpu_amdkfd_alloc_gtt_mem(adev: dev->adev, size, |
2495 | mem_obj: &(mem_obj->gtt_mem), gpu_addr: &(mem_obj->gpu_addr), |
2496 | cpu_ptr: (void *)&(mem_obj->cpu_ptr), mqd_gfx9: false); |
2497 | |
2498 | return retval; |
2499 | } |
2500 | |
2501 | struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev) |
2502 | { |
2503 | struct device_queue_manager *dqm; |
2504 | |
2505 | pr_debug("Loading device queue manager\n" ); |
2506 | |
2507 | dqm = kzalloc(size: sizeof(*dqm), GFP_KERNEL); |
2508 | if (!dqm) |
2509 | return NULL; |
2510 | |
2511 | switch (dev->adev->asic_type) { |
2512 | /* HWS is not available on Hawaii. */ |
2513 | case CHIP_HAWAII: |
2514 | /* HWS depends on CWSR for timely dequeue. CWSR is not |
2515 | * available on Tonga. |
2516 | * |
2517 | * FIXME: This argument also applies to Kaveri. |
2518 | */ |
2519 | case CHIP_TONGA: |
2520 | dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS; |
2521 | break; |
2522 | default: |
2523 | dqm->sched_policy = sched_policy; |
2524 | break; |
2525 | } |
2526 | |
2527 | dqm->dev = dev; |
2528 | switch (dqm->sched_policy) { |
2529 | case KFD_SCHED_POLICY_HWS: |
2530 | case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: |
2531 | /* initialize dqm for cp scheduling */ |
2532 | dqm->ops.create_queue = create_queue_cpsch; |
2533 | dqm->ops.initialize = initialize_cpsch; |
2534 | dqm->ops.start = start_cpsch; |
2535 | dqm->ops.stop = stop_cpsch; |
2536 | dqm->ops.pre_reset = pre_reset; |
2537 | dqm->ops.destroy_queue = destroy_queue_cpsch; |
2538 | dqm->ops.update_queue = update_queue; |
2539 | dqm->ops.register_process = register_process; |
2540 | dqm->ops.unregister_process = unregister_process; |
2541 | dqm->ops.uninitialize = uninitialize; |
2542 | dqm->ops.create_kernel_queue = create_kernel_queue_cpsch; |
2543 | dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch; |
2544 | dqm->ops.set_cache_memory_policy = set_cache_memory_policy; |
2545 | dqm->ops.process_termination = process_termination_cpsch; |
2546 | dqm->ops.evict_process_queues = evict_process_queues_cpsch; |
2547 | dqm->ops.restore_process_queues = restore_process_queues_cpsch; |
2548 | dqm->ops.get_wave_state = get_wave_state; |
2549 | dqm->ops.reset_queues = reset_queues_cpsch; |
2550 | dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info; |
2551 | dqm->ops.checkpoint_mqd = checkpoint_mqd; |
2552 | break; |
2553 | case KFD_SCHED_POLICY_NO_HWS: |
2554 | /* initialize dqm for no cp scheduling */ |
2555 | dqm->ops.start = start_nocpsch; |
2556 | dqm->ops.stop = stop_nocpsch; |
2557 | dqm->ops.pre_reset = pre_reset; |
2558 | dqm->ops.create_queue = create_queue_nocpsch; |
2559 | dqm->ops.destroy_queue = destroy_queue_nocpsch; |
2560 | dqm->ops.update_queue = update_queue; |
2561 | dqm->ops.register_process = register_process; |
2562 | dqm->ops.unregister_process = unregister_process; |
2563 | dqm->ops.initialize = initialize_nocpsch; |
2564 | dqm->ops.uninitialize = uninitialize; |
2565 | dqm->ops.set_cache_memory_policy = set_cache_memory_policy; |
2566 | dqm->ops.process_termination = process_termination_nocpsch; |
2567 | dqm->ops.evict_process_queues = evict_process_queues_nocpsch; |
2568 | dqm->ops.restore_process_queues = |
2569 | restore_process_queues_nocpsch; |
2570 | dqm->ops.get_wave_state = get_wave_state; |
2571 | dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info; |
2572 | dqm->ops.checkpoint_mqd = checkpoint_mqd; |
2573 | break; |
2574 | default: |
2575 | dev_err(dev->adev->dev, "Invalid scheduling policy %d\n" , dqm->sched_policy); |
2576 | goto out_free; |
2577 | } |
2578 | |
2579 | switch (dev->adev->asic_type) { |
2580 | case CHIP_KAVERI: |
2581 | case CHIP_HAWAII: |
2582 | device_queue_manager_init_cik(asic_ops: &dqm->asic_ops); |
2583 | break; |
2584 | |
2585 | case CHIP_CARRIZO: |
2586 | case CHIP_TONGA: |
2587 | case CHIP_FIJI: |
2588 | case CHIP_POLARIS10: |
2589 | case CHIP_POLARIS11: |
2590 | case CHIP_POLARIS12: |
2591 | case CHIP_VEGAM: |
2592 | device_queue_manager_init_vi(asic_ops: &dqm->asic_ops); |
2593 | break; |
2594 | |
2595 | default: |
2596 | if (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0)) |
2597 | device_queue_manager_init_v11(asic_ops: &dqm->asic_ops); |
2598 | else if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) |
2599 | device_queue_manager_init_v10(asic_ops: &dqm->asic_ops); |
2600 | else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1)) |
2601 | device_queue_manager_init_v9(asic_ops: &dqm->asic_ops); |
2602 | else { |
2603 | WARN(1, "Unexpected ASIC family %u" , |
2604 | dev->adev->asic_type); |
2605 | goto out_free; |
2606 | } |
2607 | } |
2608 | |
2609 | if (init_mqd_managers(dqm)) |
2610 | goto out_free; |
2611 | |
2612 | if (!dev->kfd->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) { |
2613 | dev_err(dev->adev->dev, "Failed to allocate hiq sdma mqd trunk buffer\n" ); |
2614 | goto out_free; |
2615 | } |
2616 | |
2617 | if (!dqm->ops.initialize(dqm)) { |
2618 | init_waitqueue_head(&dqm->destroy_wait); |
2619 | return dqm; |
2620 | } |
2621 | |
2622 | out_free: |
2623 | kfree(objp: dqm); |
2624 | return NULL; |
2625 | } |
2626 | |
2627 | static void deallocate_hiq_sdma_mqd(struct kfd_node *dev, |
2628 | struct kfd_mem_obj *mqd) |
2629 | { |
2630 | WARN(!mqd, "No hiq sdma mqd trunk to free" ); |
2631 | |
2632 | amdgpu_amdkfd_free_gtt_mem(adev: dev->adev, mem_obj: mqd->gtt_mem); |
2633 | } |
2634 | |
2635 | void device_queue_manager_uninit(struct device_queue_manager *dqm) |
2636 | { |
2637 | dqm->ops.stop(dqm); |
2638 | dqm->ops.uninitialize(dqm); |
2639 | if (!dqm->dev->kfd->shared_resources.enable_mes) |
2640 | deallocate_hiq_sdma_mqd(dev: dqm->dev, mqd: &dqm->hiq_sdma_mqd); |
2641 | kfree(objp: dqm); |
2642 | } |
2643 | |
2644 | int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid) |
2645 | { |
2646 | struct kfd_process_device *pdd; |
2647 | struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); |
2648 | int ret = 0; |
2649 | |
2650 | if (!p) |
2651 | return -EINVAL; |
2652 | WARN(debug_evictions, "Evicting pid %d" , p->lead_thread->pid); |
2653 | pdd = kfd_get_process_device_data(dev: dqm->dev, p); |
2654 | if (pdd) |
2655 | ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd); |
2656 | kfd_unref_process(p); |
2657 | |
2658 | return ret; |
2659 | } |
2660 | |
2661 | static void kfd_process_hw_exception(struct work_struct *work) |
2662 | { |
2663 | struct device_queue_manager *dqm = container_of(work, |
2664 | struct device_queue_manager, hw_exception_work); |
2665 | amdgpu_amdkfd_gpu_reset(adev: dqm->dev->adev); |
2666 | } |
2667 | |
2668 | int reserve_debug_trap_vmid(struct device_queue_manager *dqm, |
2669 | struct qcm_process_device *qpd) |
2670 | { |
2671 | int r; |
2672 | struct device *dev = dqm->dev->adev->dev; |
2673 | int updated_vmid_mask; |
2674 | |
2675 | if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { |
2676 | dev_err(dev, "Unsupported on sched_policy: %i\n" , dqm->sched_policy); |
2677 | return -EINVAL; |
2678 | } |
2679 | |
2680 | dqm_lock(dqm); |
2681 | |
2682 | if (dqm->trap_debug_vmid != 0) { |
2683 | dev_err(dev, "Trap debug id already reserved\n" ); |
2684 | r = -EBUSY; |
2685 | goto out_unlock; |
2686 | } |
2687 | |
2688 | r = unmap_queues_cpsch(dqm, filter: KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, filter_param: 0, |
2689 | USE_DEFAULT_GRACE_PERIOD, reset: false); |
2690 | if (r) |
2691 | goto out_unlock; |
2692 | |
2693 | updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; |
2694 | updated_vmid_mask &= ~(1 << dqm->dev->vm_info.last_vmid_kfd); |
2695 | |
2696 | dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask; |
2697 | dqm->trap_debug_vmid = dqm->dev->vm_info.last_vmid_kfd; |
2698 | r = set_sched_resources(dqm); |
2699 | if (r) |
2700 | goto out_unlock; |
2701 | |
2702 | r = map_queues_cpsch(dqm); |
2703 | if (r) |
2704 | goto out_unlock; |
2705 | |
2706 | pr_debug("Reserved VMID for trap debug: %i\n" , dqm->trap_debug_vmid); |
2707 | |
2708 | out_unlock: |
2709 | dqm_unlock(dqm); |
2710 | return r; |
2711 | } |
2712 | |
2713 | /* |
2714 | * Releases vmid for the trap debugger |
2715 | */ |
2716 | int release_debug_trap_vmid(struct device_queue_manager *dqm, |
2717 | struct qcm_process_device *qpd) |
2718 | { |
2719 | struct device *dev = dqm->dev->adev->dev; |
2720 | int r; |
2721 | int updated_vmid_mask; |
2722 | uint32_t trap_debug_vmid; |
2723 | |
2724 | if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { |
2725 | dev_err(dev, "Unsupported on sched_policy: %i\n" , dqm->sched_policy); |
2726 | return -EINVAL; |
2727 | } |
2728 | |
2729 | dqm_lock(dqm); |
2730 | trap_debug_vmid = dqm->trap_debug_vmid; |
2731 | if (dqm->trap_debug_vmid == 0) { |
2732 | dev_err(dev, "Trap debug id is not reserved\n" ); |
2733 | r = -EINVAL; |
2734 | goto out_unlock; |
2735 | } |
2736 | |
2737 | r = unmap_queues_cpsch(dqm, filter: KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, filter_param: 0, |
2738 | USE_DEFAULT_GRACE_PERIOD, reset: false); |
2739 | if (r) |
2740 | goto out_unlock; |
2741 | |
2742 | updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; |
2743 | updated_vmid_mask |= (1 << dqm->dev->vm_info.last_vmid_kfd); |
2744 | |
2745 | dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask; |
2746 | dqm->trap_debug_vmid = 0; |
2747 | r = set_sched_resources(dqm); |
2748 | if (r) |
2749 | goto out_unlock; |
2750 | |
2751 | r = map_queues_cpsch(dqm); |
2752 | if (r) |
2753 | goto out_unlock; |
2754 | |
2755 | pr_debug("Released VMID for trap debug: %i\n" , trap_debug_vmid); |
2756 | |
2757 | out_unlock: |
2758 | dqm_unlock(dqm); |
2759 | return r; |
2760 | } |
2761 | |
2762 | #define QUEUE_NOT_FOUND -1 |
2763 | /* invalidate queue operation in array */ |
2764 | static void q_array_invalidate(uint32_t num_queues, uint32_t *queue_ids) |
2765 | { |
2766 | int i; |
2767 | |
2768 | for (i = 0; i < num_queues; i++) |
2769 | queue_ids[i] |= KFD_DBG_QUEUE_INVALID_MASK; |
2770 | } |
2771 | |
2772 | /* find queue index in array */ |
2773 | static int q_array_get_index(unsigned int queue_id, |
2774 | uint32_t num_queues, |
2775 | uint32_t *queue_ids) |
2776 | { |
2777 | int i; |
2778 | |
2779 | for (i = 0; i < num_queues; i++) |
2780 | if (queue_id == (queue_ids[i] & ~KFD_DBG_QUEUE_INVALID_MASK)) |
2781 | return i; |
2782 | |
2783 | return QUEUE_NOT_FOUND; |
2784 | } |
2785 | |
2786 | struct copy_context_work_handler_workarea { |
2787 | struct work_struct copy_context_work; |
2788 | struct kfd_process *p; |
2789 | }; |
2790 | |
2791 | static void copy_context_work_handler (struct work_struct *work) |
2792 | { |
2793 | struct copy_context_work_handler_workarea *workarea; |
2794 | struct mqd_manager *mqd_mgr; |
2795 | struct queue *q; |
2796 | struct mm_struct *mm; |
2797 | struct kfd_process *p; |
2798 | uint32_t tmp_ctl_stack_used_size, tmp_save_area_used_size; |
2799 | int i; |
2800 | |
2801 | workarea = container_of(work, |
2802 | struct copy_context_work_handler_workarea, |
2803 | copy_context_work); |
2804 | |
2805 | p = workarea->p; |
2806 | mm = get_task_mm(task: p->lead_thread); |
2807 | |
2808 | if (!mm) |
2809 | return; |
2810 | |
2811 | kthread_use_mm(mm); |
2812 | for (i = 0; i < p->n_pdds; i++) { |
2813 | struct kfd_process_device *pdd = p->pdds[i]; |
2814 | struct device_queue_manager *dqm = pdd->dev->dqm; |
2815 | struct qcm_process_device *qpd = &pdd->qpd; |
2816 | |
2817 | list_for_each_entry(q, &qpd->queues_list, list) { |
2818 | mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; |
2819 | |
2820 | /* We ignore the return value from get_wave_state |
2821 | * because |
2822 | * i) right now, it always returns 0, and |
2823 | * ii) if we hit an error, we would continue to the |
2824 | * next queue anyway. |
2825 | */ |
2826 | mqd_mgr->get_wave_state(mqd_mgr, |
2827 | q->mqd, |
2828 | &q->properties, |
2829 | (void __user *) q->properties.ctx_save_restore_area_address, |
2830 | &tmp_ctl_stack_used_size, |
2831 | &tmp_save_area_used_size); |
2832 | } |
2833 | } |
2834 | kthread_unuse_mm(mm); |
2835 | mmput(mm); |
2836 | } |
2837 | |
2838 | static uint32_t *get_queue_ids(uint32_t num_queues, uint32_t *usr_queue_id_array) |
2839 | { |
2840 | size_t array_size = num_queues * sizeof(uint32_t); |
2841 | |
2842 | if (!usr_queue_id_array) |
2843 | return NULL; |
2844 | |
2845 | return memdup_user(usr_queue_id_array, array_size); |
2846 | } |
2847 | |
2848 | int resume_queues(struct kfd_process *p, |
2849 | uint32_t num_queues, |
2850 | uint32_t *usr_queue_id_array) |
2851 | { |
2852 | uint32_t *queue_ids = NULL; |
2853 | int total_resumed = 0; |
2854 | int i; |
2855 | |
2856 | if (usr_queue_id_array) { |
2857 | queue_ids = get_queue_ids(num_queues, usr_queue_id_array); |
2858 | |
2859 | if (IS_ERR(ptr: queue_ids)) |
2860 | return PTR_ERR(ptr: queue_ids); |
2861 | |
2862 | /* mask all queues as invalid. unmask per successful request */ |
2863 | q_array_invalidate(num_queues, queue_ids); |
2864 | } |
2865 | |
2866 | for (i = 0; i < p->n_pdds; i++) { |
2867 | struct kfd_process_device *pdd = p->pdds[i]; |
2868 | struct device_queue_manager *dqm = pdd->dev->dqm; |
2869 | struct device *dev = dqm->dev->adev->dev; |
2870 | struct qcm_process_device *qpd = &pdd->qpd; |
2871 | struct queue *q; |
2872 | int r, per_device_resumed = 0; |
2873 | |
2874 | dqm_lock(dqm); |
2875 | |
2876 | /* unmask queues that resume or already resumed as valid */ |
2877 | list_for_each_entry(q, &qpd->queues_list, list) { |
2878 | int q_idx = QUEUE_NOT_FOUND; |
2879 | |
2880 | if (queue_ids) |
2881 | q_idx = q_array_get_index( |
2882 | queue_id: q->properties.queue_id, |
2883 | num_queues, |
2884 | queue_ids); |
2885 | |
2886 | if (!queue_ids || q_idx != QUEUE_NOT_FOUND) { |
2887 | int err = resume_single_queue(dqm, qpd: &pdd->qpd, q); |
2888 | |
2889 | if (queue_ids) { |
2890 | if (!err) { |
2891 | queue_ids[q_idx] &= |
2892 | ~KFD_DBG_QUEUE_INVALID_MASK; |
2893 | } else { |
2894 | queue_ids[q_idx] |= |
2895 | KFD_DBG_QUEUE_ERROR_MASK; |
2896 | break; |
2897 | } |
2898 | } |
2899 | |
2900 | if (dqm->dev->kfd->shared_resources.enable_mes) { |
2901 | wake_up_all(&dqm->destroy_wait); |
2902 | if (!err) |
2903 | total_resumed++; |
2904 | } else { |
2905 | per_device_resumed++; |
2906 | } |
2907 | } |
2908 | } |
2909 | |
2910 | if (!per_device_resumed) { |
2911 | dqm_unlock(dqm); |
2912 | continue; |
2913 | } |
2914 | |
2915 | r = execute_queues_cpsch(dqm, |
2916 | filter: KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, |
2917 | filter_param: 0, |
2918 | USE_DEFAULT_GRACE_PERIOD); |
2919 | if (r) { |
2920 | dev_err(dev, "Failed to resume process queues\n" ); |
2921 | if (queue_ids) { |
2922 | list_for_each_entry(q, &qpd->queues_list, list) { |
2923 | int q_idx = q_array_get_index( |
2924 | queue_id: q->properties.queue_id, |
2925 | num_queues, |
2926 | queue_ids); |
2927 | |
2928 | /* mask queue as error on resume fail */ |
2929 | if (q_idx != QUEUE_NOT_FOUND) |
2930 | queue_ids[q_idx] |= |
2931 | KFD_DBG_QUEUE_ERROR_MASK; |
2932 | } |
2933 | } |
2934 | } else { |
2935 | wake_up_all(&dqm->destroy_wait); |
2936 | total_resumed += per_device_resumed; |
2937 | } |
2938 | |
2939 | dqm_unlock(dqm); |
2940 | } |
2941 | |
2942 | if (queue_ids) { |
2943 | if (copy_to_user(to: (void __user *)usr_queue_id_array, from: queue_ids, |
2944 | n: num_queues * sizeof(uint32_t))) |
2945 | pr_err("copy_to_user failed on queue resume\n" ); |
2946 | |
2947 | kfree(objp: queue_ids); |
2948 | } |
2949 | |
2950 | return total_resumed; |
2951 | } |
2952 | |
2953 | int suspend_queues(struct kfd_process *p, |
2954 | uint32_t num_queues, |
2955 | uint32_t grace_period, |
2956 | uint64_t exception_clear_mask, |
2957 | uint32_t *usr_queue_id_array) |
2958 | { |
2959 | uint32_t *queue_ids = get_queue_ids(num_queues, usr_queue_id_array); |
2960 | int total_suspended = 0; |
2961 | int i; |
2962 | |
2963 | if (IS_ERR(ptr: queue_ids)) |
2964 | return PTR_ERR(ptr: queue_ids); |
2965 | |
2966 | /* mask all queues as invalid. umask on successful request */ |
2967 | q_array_invalidate(num_queues, queue_ids); |
2968 | |
2969 | for (i = 0; i < p->n_pdds; i++) { |
2970 | struct kfd_process_device *pdd = p->pdds[i]; |
2971 | struct device_queue_manager *dqm = pdd->dev->dqm; |
2972 | struct device *dev = dqm->dev->adev->dev; |
2973 | struct qcm_process_device *qpd = &pdd->qpd; |
2974 | struct queue *q; |
2975 | int r, per_device_suspended = 0; |
2976 | |
2977 | mutex_lock(&p->event_mutex); |
2978 | dqm_lock(dqm); |
2979 | |
2980 | /* unmask queues that suspend or already suspended */ |
2981 | list_for_each_entry(q, &qpd->queues_list, list) { |
2982 | int q_idx = q_array_get_index(queue_id: q->properties.queue_id, |
2983 | num_queues, |
2984 | queue_ids); |
2985 | |
2986 | if (q_idx != QUEUE_NOT_FOUND) { |
2987 | int err = suspend_single_queue(dqm, pdd, q); |
2988 | bool is_mes = dqm->dev->kfd->shared_resources.enable_mes; |
2989 | |
2990 | if (!err) { |
2991 | queue_ids[q_idx] &= ~KFD_DBG_QUEUE_INVALID_MASK; |
2992 | if (exception_clear_mask && is_mes) |
2993 | q->properties.exception_status &= |
2994 | ~exception_clear_mask; |
2995 | |
2996 | if (is_mes) |
2997 | total_suspended++; |
2998 | else |
2999 | per_device_suspended++; |
3000 | } else if (err != -EBUSY) { |
3001 | r = err; |
3002 | queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK; |
3003 | break; |
3004 | } |
3005 | } |
3006 | } |
3007 | |
3008 | if (!per_device_suspended) { |
3009 | dqm_unlock(dqm); |
3010 | mutex_unlock(lock: &p->event_mutex); |
3011 | if (total_suspended) |
3012 | amdgpu_amdkfd_debug_mem_fence(adev: dqm->dev->adev); |
3013 | continue; |
3014 | } |
3015 | |
3016 | r = execute_queues_cpsch(dqm, |
3017 | filter: KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, filter_param: 0, |
3018 | grace_period); |
3019 | |
3020 | if (r) |
3021 | dev_err(dev, "Failed to suspend process queues.\n" ); |
3022 | else |
3023 | total_suspended += per_device_suspended; |
3024 | |
3025 | list_for_each_entry(q, &qpd->queues_list, list) { |
3026 | int q_idx = q_array_get_index(queue_id: q->properties.queue_id, |
3027 | num_queues, queue_ids); |
3028 | |
3029 | if (q_idx == QUEUE_NOT_FOUND) |
3030 | continue; |
3031 | |
3032 | /* mask queue as error on suspend fail */ |
3033 | if (r) |
3034 | queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK; |
3035 | else if (exception_clear_mask) |
3036 | q->properties.exception_status &= |
3037 | ~exception_clear_mask; |
3038 | } |
3039 | |
3040 | dqm_unlock(dqm); |
3041 | mutex_unlock(lock: &p->event_mutex); |
3042 | amdgpu_device_flush_hdp(adev: dqm->dev->adev, NULL); |
3043 | } |
3044 | |
3045 | if (total_suspended) { |
3046 | struct copy_context_work_handler_workarea copy_context_worker; |
3047 | |
3048 | INIT_WORK_ONSTACK( |
3049 | ©_context_worker.copy_context_work, |
3050 | copy_context_work_handler); |
3051 | |
3052 | copy_context_worker.p = p; |
3053 | |
3054 | schedule_work(work: ©_context_worker.copy_context_work); |
3055 | |
3056 | |
3057 | flush_work(work: ©_context_worker.copy_context_work); |
3058 | destroy_work_on_stack(work: ©_context_worker.copy_context_work); |
3059 | } |
3060 | |
3061 | if (copy_to_user(to: (void __user *)usr_queue_id_array, from: queue_ids, |
3062 | n: num_queues * sizeof(uint32_t))) |
3063 | pr_err("copy_to_user failed on queue suspend\n" ); |
3064 | |
3065 | kfree(objp: queue_ids); |
3066 | |
3067 | return total_suspended; |
3068 | } |
3069 | |
3070 | static uint32_t set_queue_type_for_user(struct queue_properties *q_props) |
3071 | { |
3072 | switch (q_props->type) { |
3073 | case KFD_QUEUE_TYPE_COMPUTE: |
3074 | return q_props->format == KFD_QUEUE_FORMAT_PM4 |
3075 | ? KFD_IOC_QUEUE_TYPE_COMPUTE |
3076 | : KFD_IOC_QUEUE_TYPE_COMPUTE_AQL; |
3077 | case KFD_QUEUE_TYPE_SDMA: |
3078 | return KFD_IOC_QUEUE_TYPE_SDMA; |
3079 | case KFD_QUEUE_TYPE_SDMA_XGMI: |
3080 | return KFD_IOC_QUEUE_TYPE_SDMA_XGMI; |
3081 | default: |
3082 | WARN_ONCE(true, "queue type not recognized!" ); |
3083 | return 0xffffffff; |
3084 | }; |
3085 | } |
3086 | |
3087 | void set_queue_snapshot_entry(struct queue *q, |
3088 | uint64_t exception_clear_mask, |
3089 | struct kfd_queue_snapshot_entry *qss_entry) |
3090 | { |
3091 | qss_entry->ring_base_address = q->properties.queue_address; |
3092 | qss_entry->write_pointer_address = (uint64_t)q->properties.write_ptr; |
3093 | qss_entry->read_pointer_address = (uint64_t)q->properties.read_ptr; |
3094 | qss_entry->ctx_save_restore_address = |
3095 | q->properties.ctx_save_restore_area_address; |
3096 | qss_entry->ctx_save_restore_area_size = |
3097 | q->properties.ctx_save_restore_area_size; |
3098 | qss_entry->exception_status = q->properties.exception_status; |
3099 | qss_entry->queue_id = q->properties.queue_id; |
3100 | qss_entry->gpu_id = q->device->id; |
3101 | qss_entry->ring_size = (uint32_t)q->properties.queue_size; |
3102 | qss_entry->queue_type = set_queue_type_for_user(&q->properties); |
3103 | q->properties.exception_status &= ~exception_clear_mask; |
3104 | } |
3105 | |
3106 | int debug_lock_and_unmap(struct device_queue_manager *dqm) |
3107 | { |
3108 | struct device *dev = dqm->dev->adev->dev; |
3109 | int r; |
3110 | |
3111 | if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { |
3112 | dev_err(dev, "Unsupported on sched_policy: %i\n" , dqm->sched_policy); |
3113 | return -EINVAL; |
3114 | } |
3115 | |
3116 | if (!kfd_dbg_is_per_vmid_supported(dev: dqm->dev)) |
3117 | return 0; |
3118 | |
3119 | dqm_lock(dqm); |
3120 | |
3121 | r = unmap_queues_cpsch(dqm, filter: KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, filter_param: 0, grace_period: 0, reset: false); |
3122 | if (r) |
3123 | dqm_unlock(dqm); |
3124 | |
3125 | return r; |
3126 | } |
3127 | |
3128 | int debug_map_and_unlock(struct device_queue_manager *dqm) |
3129 | { |
3130 | struct device *dev = dqm->dev->adev->dev; |
3131 | int r; |
3132 | |
3133 | if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { |
3134 | dev_err(dev, "Unsupported on sched_policy: %i\n" , dqm->sched_policy); |
3135 | return -EINVAL; |
3136 | } |
3137 | |
3138 | if (!kfd_dbg_is_per_vmid_supported(dev: dqm->dev)) |
3139 | return 0; |
3140 | |
3141 | r = map_queues_cpsch(dqm); |
3142 | |
3143 | dqm_unlock(dqm); |
3144 | |
3145 | return r; |
3146 | } |
3147 | |
3148 | int debug_refresh_runlist(struct device_queue_manager *dqm) |
3149 | { |
3150 | int r = debug_lock_and_unmap(dqm); |
3151 | |
3152 | if (r) |
3153 | return r; |
3154 | |
3155 | return debug_map_and_unlock(dqm); |
3156 | } |
3157 | |
3158 | #if defined(CONFIG_DEBUG_FS) |
3159 | |
3160 | static void seq_reg_dump(struct seq_file *m, |
3161 | uint32_t (*dump)[2], uint32_t n_regs) |
3162 | { |
3163 | uint32_t i, count; |
3164 | |
3165 | for (i = 0, count = 0; i < n_regs; i++) { |
3166 | if (count == 0 || |
3167 | dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) { |
3168 | seq_printf(m, fmt: "%s %08x: %08x" , |
3169 | i ? "\n" : "" , |
3170 | dump[i][0], dump[i][1]); |
3171 | count = 7; |
3172 | } else { |
3173 | seq_printf(m, fmt: " %08x" , dump[i][1]); |
3174 | count--; |
3175 | } |
3176 | } |
3177 | |
3178 | seq_puts(m, s: "\n" ); |
3179 | } |
3180 | |
3181 | int dqm_debugfs_hqds(struct seq_file *m, void *data) |
3182 | { |
3183 | struct device_queue_manager *dqm = data; |
3184 | uint32_t xcc_mask = dqm->dev->xcc_mask; |
3185 | uint32_t (*dump)[2], n_regs; |
3186 | int pipe, queue; |
3187 | int r = 0, xcc_id; |
3188 | uint32_t sdma_engine_start; |
3189 | |
3190 | if (!dqm->sched_running) { |
3191 | seq_puts(m, s: " Device is stopped\n" ); |
3192 | return 0; |
3193 | } |
3194 | |
3195 | for_each_inst(xcc_id, xcc_mask) { |
3196 | r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, |
3197 | KFD_CIK_HIQ_PIPE, |
3198 | KFD_CIK_HIQ_QUEUE, &dump, |
3199 | &n_regs, xcc_id); |
3200 | if (!r) { |
3201 | seq_printf( |
3202 | m, |
3203 | fmt: " Inst %d, HIQ on MEC %d Pipe %d Queue %d\n" , |
3204 | xcc_id, |
3205 | KFD_CIK_HIQ_PIPE / get_pipes_per_mec(dqm) + 1, |
3206 | KFD_CIK_HIQ_PIPE % get_pipes_per_mec(dqm), |
3207 | KFD_CIK_HIQ_QUEUE); |
3208 | seq_reg_dump(m, dump, n_regs); |
3209 | |
3210 | kfree(objp: dump); |
3211 | } |
3212 | |
3213 | for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { |
3214 | int pipe_offset = pipe * get_queues_per_pipe(dqm); |
3215 | |
3216 | for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) { |
3217 | if (!test_bit(pipe_offset + queue, |
3218 | dqm->dev->kfd->shared_resources.cp_queue_bitmap)) |
3219 | continue; |
3220 | |
3221 | r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, |
3222 | pipe, queue, |
3223 | &dump, &n_regs, |
3224 | xcc_id); |
3225 | if (r) |
3226 | break; |
3227 | |
3228 | seq_printf(m, |
3229 | fmt: " Inst %d, CP Pipe %d, Queue %d\n" , |
3230 | xcc_id, pipe, queue); |
3231 | seq_reg_dump(m, dump, n_regs); |
3232 | |
3233 | kfree(objp: dump); |
3234 | } |
3235 | } |
3236 | } |
3237 | |
3238 | sdma_engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm); |
3239 | for (pipe = sdma_engine_start; |
3240 | pipe < (sdma_engine_start + get_num_all_sdma_engines(dqm)); |
3241 | pipe++) { |
3242 | for (queue = 0; |
3243 | queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine; |
3244 | queue++) { |
3245 | r = dqm->dev->kfd2kgd->hqd_sdma_dump( |
3246 | dqm->dev->adev, pipe, queue, &dump, &n_regs); |
3247 | if (r) |
3248 | break; |
3249 | |
3250 | seq_printf(m, fmt: " SDMA Engine %d, RLC %d\n" , |
3251 | pipe, queue); |
3252 | seq_reg_dump(m, dump, n_regs); |
3253 | |
3254 | kfree(objp: dump); |
3255 | } |
3256 | } |
3257 | |
3258 | return r; |
3259 | } |
3260 | |
3261 | int dqm_debugfs_hang_hws(struct device_queue_manager *dqm) |
3262 | { |
3263 | int r = 0; |
3264 | |
3265 | dqm_lock(dqm); |
3266 | r = pm_debugfs_hang_hws(pm: &dqm->packet_mgr); |
3267 | if (r) { |
3268 | dqm_unlock(dqm); |
3269 | return r; |
3270 | } |
3271 | dqm->active_runlist = true; |
3272 | r = execute_queues_cpsch(dqm, filter: KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, |
3273 | filter_param: 0, USE_DEFAULT_GRACE_PERIOD); |
3274 | dqm_unlock(dqm); |
3275 | |
3276 | return r; |
3277 | } |
3278 | |
3279 | #endif |
3280 | |