1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | */ |
25 | |
26 | #include <linux/firmware.h> |
27 | #include "amdgpu.h" |
28 | #include "amdgpu_gfx.h" |
29 | #include "amdgpu_rlc.h" |
30 | #include "amdgpu_ras.h" |
31 | #include "amdgpu_xcp.h" |
32 | #include "amdgpu_xgmi.h" |
33 | |
34 | /* delay 0.1 second to enable gfx off feature */ |
35 | #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) |
36 | |
37 | #define GFX_OFF_NO_DELAY 0 |
38 | |
39 | /* |
40 | * GPU GFX IP block helpers function. |
41 | */ |
42 | |
43 | int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, |
44 | int pipe, int queue) |
45 | { |
46 | int bit = 0; |
47 | |
48 | bit += mec * adev->gfx.mec.num_pipe_per_mec |
49 | * adev->gfx.mec.num_queue_per_pipe; |
50 | bit += pipe * adev->gfx.mec.num_queue_per_pipe; |
51 | bit += queue; |
52 | |
53 | return bit; |
54 | } |
55 | |
56 | void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, |
57 | int *mec, int *pipe, int *queue) |
58 | { |
59 | *queue = bit % adev->gfx.mec.num_queue_per_pipe; |
60 | *pipe = (bit / adev->gfx.mec.num_queue_per_pipe) |
61 | % adev->gfx.mec.num_pipe_per_mec; |
62 | *mec = (bit / adev->gfx.mec.num_queue_per_pipe) |
63 | / adev->gfx.mec.num_pipe_per_mec; |
64 | |
65 | } |
66 | |
67 | bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, |
68 | int xcc_id, int mec, int pipe, int queue) |
69 | { |
70 | return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue), |
71 | adev->gfx.mec_bitmap[xcc_id].queue_bitmap); |
72 | } |
73 | |
74 | int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, |
75 | int me, int pipe, int queue) |
76 | { |
77 | int bit = 0; |
78 | |
79 | bit += me * adev->gfx.me.num_pipe_per_me |
80 | * adev->gfx.me.num_queue_per_pipe; |
81 | bit += pipe * adev->gfx.me.num_queue_per_pipe; |
82 | bit += queue; |
83 | |
84 | return bit; |
85 | } |
86 | |
87 | void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, |
88 | int *me, int *pipe, int *queue) |
89 | { |
90 | *queue = bit % adev->gfx.me.num_queue_per_pipe; |
91 | *pipe = (bit / adev->gfx.me.num_queue_per_pipe) |
92 | % adev->gfx.me.num_pipe_per_me; |
93 | *me = (bit / adev->gfx.me.num_queue_per_pipe) |
94 | / adev->gfx.me.num_pipe_per_me; |
95 | } |
96 | |
97 | bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, |
98 | int me, int pipe, int queue) |
99 | { |
100 | return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue), |
101 | adev->gfx.me.queue_bitmap); |
102 | } |
103 | |
104 | /** |
105 | * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter |
106 | * |
107 | * @mask: array in which the per-shader array disable masks will be stored |
108 | * @max_se: number of SEs |
109 | * @max_sh: number of SHs |
110 | * |
111 | * The bitmask of CUs to be disabled in the shader array determined by se and |
112 | * sh is stored in mask[se * max_sh + sh]. |
113 | */ |
114 | void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh) |
115 | { |
116 | unsigned int se, sh, cu; |
117 | const char *p; |
118 | |
119 | memset(mask, 0, sizeof(*mask) * max_se * max_sh); |
120 | |
121 | if (!amdgpu_disable_cu || !*amdgpu_disable_cu) |
122 | return; |
123 | |
124 | p = amdgpu_disable_cu; |
125 | for (;;) { |
126 | char *next; |
127 | int ret = sscanf(p, "%u.%u.%u" , &se, &sh, &cu); |
128 | |
129 | if (ret < 3) { |
130 | DRM_ERROR("amdgpu: could not parse disable_cu\n" ); |
131 | return; |
132 | } |
133 | |
134 | if (se < max_se && sh < max_sh && cu < 16) { |
135 | DRM_INFO("amdgpu: disabling CU %u.%u.%u\n" , se, sh, cu); |
136 | mask[se * max_sh + sh] |= 1u << cu; |
137 | } else { |
138 | DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n" , |
139 | se, sh, cu); |
140 | } |
141 | |
142 | next = strchr(p, ','); |
143 | if (!next) |
144 | break; |
145 | p = next + 1; |
146 | } |
147 | } |
148 | |
149 | static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev) |
150 | { |
151 | return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1; |
152 | } |
153 | |
154 | static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev) |
155 | { |
156 | if (amdgpu_compute_multipipe != -1) { |
157 | DRM_INFO("amdgpu: forcing compute pipe policy %d\n" , |
158 | amdgpu_compute_multipipe); |
159 | return amdgpu_compute_multipipe == 1; |
160 | } |
161 | |
162 | if (amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0) > IP_VERSION(9, 0, 0)) |
163 | return true; |
164 | |
165 | /* FIXME: spreading the queues across pipes causes perf regressions |
166 | * on POLARIS11 compute workloads */ |
167 | if (adev->asic_type == CHIP_POLARIS11) |
168 | return false; |
169 | |
170 | return adev->gfx.mec.num_mec > 1; |
171 | } |
172 | |
173 | bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, |
174 | struct amdgpu_ring *ring) |
175 | { |
176 | int queue = ring->queue; |
177 | int pipe = ring->pipe; |
178 | |
179 | /* Policy: use pipe1 queue0 as high priority graphics queue if we |
180 | * have more than one gfx pipe. |
181 | */ |
182 | if (amdgpu_gfx_is_graphics_multipipe_capable(adev) && |
183 | adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) { |
184 | int me = ring->me; |
185 | int bit; |
186 | |
187 | bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue); |
188 | if (ring == &adev->gfx.gfx_ring[bit]) |
189 | return true; |
190 | } |
191 | |
192 | return false; |
193 | } |
194 | |
195 | bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, |
196 | struct amdgpu_ring *ring) |
197 | { |
198 | /* Policy: use 1st queue as high priority compute queue if we |
199 | * have more than one compute queue. |
200 | */ |
201 | if (adev->gfx.num_compute_rings > 1 && |
202 | ring == &adev->gfx.compute_ring[0]) |
203 | return true; |
204 | |
205 | return false; |
206 | } |
207 | |
208 | void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) |
209 | { |
210 | int i, j, queue, pipe; |
211 | bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev); |
212 | int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * |
213 | adev->gfx.mec.num_queue_per_pipe, |
214 | adev->gfx.num_compute_rings); |
215 | int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; |
216 | |
217 | if (multipipe_policy) { |
218 | /* policy: make queues evenly cross all pipes on MEC1 only |
219 | * for multiple xcc, just use the original policy for simplicity */ |
220 | for (j = 0; j < num_xcc; j++) { |
221 | for (i = 0; i < max_queues_per_mec; i++) { |
222 | pipe = i % adev->gfx.mec.num_pipe_per_mec; |
223 | queue = (i / adev->gfx.mec.num_pipe_per_mec) % |
224 | adev->gfx.mec.num_queue_per_pipe; |
225 | |
226 | set_bit(nr: pipe * adev->gfx.mec.num_queue_per_pipe + queue, |
227 | addr: adev->gfx.mec_bitmap[j].queue_bitmap); |
228 | } |
229 | } |
230 | } else { |
231 | /* policy: amdgpu owns all queues in the given pipe */ |
232 | for (j = 0; j < num_xcc; j++) { |
233 | for (i = 0; i < max_queues_per_mec; ++i) |
234 | set_bit(nr: i, addr: adev->gfx.mec_bitmap[j].queue_bitmap); |
235 | } |
236 | } |
237 | |
238 | for (j = 0; j < num_xcc; j++) { |
239 | dev_dbg(adev->dev, "mec queue bitmap weight=%d\n" , |
240 | bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); |
241 | } |
242 | } |
243 | |
244 | void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) |
245 | { |
246 | int i, queue, pipe; |
247 | bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev); |
248 | int max_queues_per_me = adev->gfx.me.num_pipe_per_me * |
249 | adev->gfx.me.num_queue_per_pipe; |
250 | |
251 | if (multipipe_policy) { |
252 | /* policy: amdgpu owns the first queue per pipe at this stage |
253 | * will extend to mulitple queues per pipe later */ |
254 | for (i = 0; i < max_queues_per_me; i++) { |
255 | pipe = i % adev->gfx.me.num_pipe_per_me; |
256 | queue = (i / adev->gfx.me.num_pipe_per_me) % |
257 | adev->gfx.me.num_queue_per_pipe; |
258 | |
259 | set_bit(nr: pipe * adev->gfx.me.num_queue_per_pipe + queue, |
260 | addr: adev->gfx.me.queue_bitmap); |
261 | } |
262 | } else { |
263 | for (i = 0; i < max_queues_per_me; ++i) |
264 | set_bit(nr: i, addr: adev->gfx.me.queue_bitmap); |
265 | } |
266 | |
267 | /* update the number of active graphics rings */ |
268 | adev->gfx.num_gfx_rings = |
269 | bitmap_weight(src: adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); |
270 | } |
271 | |
272 | static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, |
273 | struct amdgpu_ring *ring, int xcc_id) |
274 | { |
275 | int queue_bit; |
276 | int mec, pipe, queue; |
277 | |
278 | queue_bit = adev->gfx.mec.num_mec |
279 | * adev->gfx.mec.num_pipe_per_mec |
280 | * adev->gfx.mec.num_queue_per_pipe; |
281 | |
282 | while (--queue_bit >= 0) { |
283 | if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) |
284 | continue; |
285 | |
286 | amdgpu_queue_mask_bit_to_mec_queue(adev, bit: queue_bit, mec: &mec, pipe: &pipe, queue: &queue); |
287 | |
288 | /* |
289 | * 1. Using pipes 2/3 from MEC 2 seems cause problems. |
290 | * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN |
291 | * only can be issued on queue 0. |
292 | */ |
293 | if ((mec == 1 && pipe > 1) || queue != 0) |
294 | continue; |
295 | |
296 | ring->me = mec + 1; |
297 | ring->pipe = pipe; |
298 | ring->queue = queue; |
299 | |
300 | return 0; |
301 | } |
302 | |
303 | dev_err(adev->dev, "Failed to find a queue for KIQ\n" ); |
304 | return -EINVAL; |
305 | } |
306 | |
307 | int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, |
308 | struct amdgpu_ring *ring, |
309 | struct amdgpu_irq_src *irq, int xcc_id) |
310 | { |
311 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
312 | int r = 0; |
313 | |
314 | spin_lock_init(&kiq->ring_lock); |
315 | |
316 | ring->adev = NULL; |
317 | ring->ring_obj = NULL; |
318 | ring->use_doorbell = true; |
319 | ring->xcc_id = xcc_id; |
320 | ring->vm_hub = AMDGPU_GFXHUB(xcc_id); |
321 | ring->doorbell_index = |
322 | (adev->doorbell_index.kiq + |
323 | xcc_id * adev->doorbell_index.xcc_doorbell_range) |
324 | << 1; |
325 | |
326 | r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id); |
327 | if (r) |
328 | return r; |
329 | |
330 | ring->eop_gpu_addr = kiq->eop_gpu_addr; |
331 | ring->no_scheduler = true; |
332 | sprintf(buf: ring->name, fmt: "kiq_%d.%d.%d.%d" , xcc_id, ring->me, ring->pipe, ring->queue); |
333 | r = amdgpu_ring_init(adev, ring, max_dw: 1024, irq_src: irq, irq_type: AMDGPU_CP_KIQ_IRQ_DRIVER0, |
334 | hw_prio: AMDGPU_RING_PRIO_DEFAULT, NULL); |
335 | if (r) |
336 | dev_warn(adev->dev, "(%d) failed to init kiq ring\n" , r); |
337 | |
338 | return r; |
339 | } |
340 | |
341 | void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) |
342 | { |
343 | amdgpu_ring_fini(ring); |
344 | } |
345 | |
346 | void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id) |
347 | { |
348 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
349 | |
350 | amdgpu_bo_free_kernel(bo: &kiq->eop_obj, gpu_addr: &kiq->eop_gpu_addr, NULL); |
351 | } |
352 | |
353 | int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, |
354 | unsigned int hpd_size, int xcc_id) |
355 | { |
356 | int r; |
357 | u32 *hpd; |
358 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
359 | |
360 | r = amdgpu_bo_create_kernel(adev, size: hpd_size, PAGE_SIZE, |
361 | AMDGPU_GEM_DOMAIN_GTT, bo_ptr: &kiq->eop_obj, |
362 | gpu_addr: &kiq->eop_gpu_addr, cpu_addr: (void **)&hpd); |
363 | if (r) { |
364 | dev_warn(adev->dev, "failed to create KIQ bo (%d).\n" , r); |
365 | return r; |
366 | } |
367 | |
368 | memset(hpd, 0, hpd_size); |
369 | |
370 | r = amdgpu_bo_reserve(bo: kiq->eop_obj, no_intr: true); |
371 | if (unlikely(r != 0)) |
372 | dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n" , r); |
373 | amdgpu_bo_kunmap(bo: kiq->eop_obj); |
374 | amdgpu_bo_unreserve(bo: kiq->eop_obj); |
375 | |
376 | return 0; |
377 | } |
378 | |
379 | /* create MQD for each compute/gfx queue */ |
380 | int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, |
381 | unsigned int mqd_size, int xcc_id) |
382 | { |
383 | int r, i, j; |
384 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
385 | struct amdgpu_ring *ring = &kiq->ring; |
386 | u32 domain = AMDGPU_GEM_DOMAIN_GTT; |
387 | |
388 | #if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64) |
389 | /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */ |
390 | if (amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0) >= IP_VERSION(10, 0, 0)) |
391 | domain |= AMDGPU_GEM_DOMAIN_VRAM; |
392 | #endif |
393 | |
394 | /* create MQD for KIQ */ |
395 | if (!adev->enable_mes_kiq && !ring->mqd_obj) { |
396 | /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must |
397 | * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD |
398 | * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for |
399 | * KIQ MQD no matter SRIOV or Bare-metal |
400 | */ |
401 | r = amdgpu_bo_create_kernel(adev, size: mqd_size, PAGE_SIZE, |
402 | AMDGPU_GEM_DOMAIN_VRAM | |
403 | AMDGPU_GEM_DOMAIN_GTT, |
404 | bo_ptr: &ring->mqd_obj, |
405 | gpu_addr: &ring->mqd_gpu_addr, |
406 | cpu_addr: &ring->mqd_ptr); |
407 | if (r) { |
408 | dev_warn(adev->dev, "failed to create ring mqd ob (%d)" , r); |
409 | return r; |
410 | } |
411 | |
412 | /* prepare MQD backup */ |
413 | kiq->mqd_backup = kmalloc(size: mqd_size, GFP_KERNEL); |
414 | if (!kiq->mqd_backup) { |
415 | dev_warn(adev->dev, |
416 | "no memory to create MQD backup for ring %s\n" , ring->name); |
417 | return -ENOMEM; |
418 | } |
419 | } |
420 | |
421 | if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { |
422 | /* create MQD for each KGQ */ |
423 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { |
424 | ring = &adev->gfx.gfx_ring[i]; |
425 | if (!ring->mqd_obj) { |
426 | r = amdgpu_bo_create_kernel(adev, size: mqd_size, PAGE_SIZE, |
427 | domain, bo_ptr: &ring->mqd_obj, |
428 | gpu_addr: &ring->mqd_gpu_addr, cpu_addr: &ring->mqd_ptr); |
429 | if (r) { |
430 | dev_warn(adev->dev, "failed to create ring mqd bo (%d)" , r); |
431 | return r; |
432 | } |
433 | |
434 | ring->mqd_size = mqd_size; |
435 | /* prepare MQD backup */ |
436 | adev->gfx.me.mqd_backup[i] = kmalloc(size: mqd_size, GFP_KERNEL); |
437 | if (!adev->gfx.me.mqd_backup[i]) { |
438 | dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n" , ring->name); |
439 | return -ENOMEM; |
440 | } |
441 | } |
442 | } |
443 | } |
444 | |
445 | /* create MQD for each KCQ */ |
446 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
447 | j = i + xcc_id * adev->gfx.num_compute_rings; |
448 | ring = &adev->gfx.compute_ring[j]; |
449 | if (!ring->mqd_obj) { |
450 | r = amdgpu_bo_create_kernel(adev, size: mqd_size, PAGE_SIZE, |
451 | domain, bo_ptr: &ring->mqd_obj, |
452 | gpu_addr: &ring->mqd_gpu_addr, cpu_addr: &ring->mqd_ptr); |
453 | if (r) { |
454 | dev_warn(adev->dev, "failed to create ring mqd bo (%d)" , r); |
455 | return r; |
456 | } |
457 | |
458 | ring->mqd_size = mqd_size; |
459 | /* prepare MQD backup */ |
460 | adev->gfx.mec.mqd_backup[j] = kmalloc(size: mqd_size, GFP_KERNEL); |
461 | if (!adev->gfx.mec.mqd_backup[j]) { |
462 | dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n" , ring->name); |
463 | return -ENOMEM; |
464 | } |
465 | } |
466 | } |
467 | |
468 | return 0; |
469 | } |
470 | |
471 | void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id) |
472 | { |
473 | struct amdgpu_ring *ring = NULL; |
474 | int i, j; |
475 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
476 | |
477 | if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { |
478 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { |
479 | ring = &adev->gfx.gfx_ring[i]; |
480 | kfree(objp: adev->gfx.me.mqd_backup[i]); |
481 | amdgpu_bo_free_kernel(bo: &ring->mqd_obj, |
482 | gpu_addr: &ring->mqd_gpu_addr, |
483 | cpu_addr: &ring->mqd_ptr); |
484 | } |
485 | } |
486 | |
487 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
488 | j = i + xcc_id * adev->gfx.num_compute_rings; |
489 | ring = &adev->gfx.compute_ring[j]; |
490 | kfree(objp: adev->gfx.mec.mqd_backup[j]); |
491 | amdgpu_bo_free_kernel(bo: &ring->mqd_obj, |
492 | gpu_addr: &ring->mqd_gpu_addr, |
493 | cpu_addr: &ring->mqd_ptr); |
494 | } |
495 | |
496 | ring = &kiq->ring; |
497 | kfree(objp: kiq->mqd_backup); |
498 | amdgpu_bo_free_kernel(bo: &ring->mqd_obj, |
499 | gpu_addr: &ring->mqd_gpu_addr, |
500 | cpu_addr: &ring->mqd_ptr); |
501 | } |
502 | |
503 | int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) |
504 | { |
505 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
506 | struct amdgpu_ring *kiq_ring = &kiq->ring; |
507 | struct amdgpu_hive_info *hive; |
508 | struct amdgpu_ras *ras; |
509 | int hive_ras_recovery = 0; |
510 | int i, r = 0; |
511 | int j; |
512 | |
513 | if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) |
514 | return -EINVAL; |
515 | |
516 | spin_lock(lock: &kiq->ring_lock); |
517 | if (amdgpu_ring_alloc(ring: kiq_ring, ndw: kiq->pmf->unmap_queues_size * |
518 | adev->gfx.num_compute_rings)) { |
519 | spin_unlock(lock: &kiq->ring_lock); |
520 | return -ENOMEM; |
521 | } |
522 | |
523 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
524 | j = i + xcc_id * adev->gfx.num_compute_rings; |
525 | kiq->pmf->kiq_unmap_queues(kiq_ring, |
526 | &adev->gfx.compute_ring[j], |
527 | RESET_QUEUES, 0, 0); |
528 | } |
529 | |
530 | /** |
531 | * This is workaround: only skip kiq_ring test |
532 | * during ras recovery in suspend stage for gfx9.4.3 |
533 | */ |
534 | hive = amdgpu_get_xgmi_hive(adev); |
535 | if (hive) { |
536 | hive_ras_recovery = atomic_read(v: &hive->ras_recovery); |
537 | amdgpu_put_xgmi_hive(hive); |
538 | } |
539 | |
540 | ras = amdgpu_ras_get_context(adev); |
541 | if ((amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0) == IP_VERSION(9, 4, 3)) && |
542 | ras && (atomic_read(v: &ras->in_recovery) || hive_ras_recovery)) { |
543 | spin_unlock(lock: &kiq->ring_lock); |
544 | return 0; |
545 | } |
546 | |
547 | if (kiq_ring->sched.ready && !adev->job_hang) |
548 | r = amdgpu_ring_test_helper(ring: kiq_ring); |
549 | spin_unlock(lock: &kiq->ring_lock); |
550 | |
551 | return r; |
552 | } |
553 | |
554 | int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id) |
555 | { |
556 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
557 | struct amdgpu_ring *kiq_ring = &kiq->ring; |
558 | int i, r = 0; |
559 | int j; |
560 | |
561 | if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) |
562 | return -EINVAL; |
563 | |
564 | spin_lock(lock: &kiq->ring_lock); |
565 | if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { |
566 | if (amdgpu_ring_alloc(ring: kiq_ring, ndw: kiq->pmf->unmap_queues_size * |
567 | adev->gfx.num_gfx_rings)) { |
568 | spin_unlock(lock: &kiq->ring_lock); |
569 | return -ENOMEM; |
570 | } |
571 | |
572 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { |
573 | j = i + xcc_id * adev->gfx.num_gfx_rings; |
574 | kiq->pmf->kiq_unmap_queues(kiq_ring, |
575 | &adev->gfx.gfx_ring[j], |
576 | PREEMPT_QUEUES, 0, 0); |
577 | } |
578 | } |
579 | |
580 | if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) |
581 | r = amdgpu_ring_test_helper(ring: kiq_ring); |
582 | spin_unlock(lock: &kiq->ring_lock); |
583 | |
584 | return r; |
585 | } |
586 | |
587 | int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, |
588 | int queue_bit) |
589 | { |
590 | int mec, pipe, queue; |
591 | int set_resource_bit = 0; |
592 | |
593 | amdgpu_queue_mask_bit_to_mec_queue(adev, bit: queue_bit, mec: &mec, pipe: &pipe, queue: &queue); |
594 | |
595 | set_resource_bit = mec * 4 * 8 + pipe * 8 + queue; |
596 | |
597 | return set_resource_bit; |
598 | } |
599 | |
600 | int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) |
601 | { |
602 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
603 | struct amdgpu_ring *kiq_ring = &kiq->ring; |
604 | uint64_t queue_mask = 0; |
605 | int r, i, j; |
606 | |
607 | if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources) |
608 | return -EINVAL; |
609 | |
610 | for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { |
611 | if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) |
612 | continue; |
613 | |
614 | /* This situation may be hit in the future if a new HW |
615 | * generation exposes more than 64 queues. If so, the |
616 | * definition of queue_mask needs updating */ |
617 | if (WARN_ON(i > (sizeof(queue_mask)*8))) { |
618 | DRM_ERROR("Invalid KCQ enabled: %d\n" , i); |
619 | break; |
620 | } |
621 | |
622 | queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, queue_bit: i)); |
623 | } |
624 | |
625 | DRM_INFO("kiq ring mec %d pipe %d q %d\n" , kiq_ring->me, kiq_ring->pipe, |
626 | kiq_ring->queue); |
627 | amdgpu_device_flush_hdp(adev, NULL); |
628 | |
629 | spin_lock(lock: &kiq->ring_lock); |
630 | r = amdgpu_ring_alloc(ring: kiq_ring, ndw: kiq->pmf->map_queues_size * |
631 | adev->gfx.num_compute_rings + |
632 | kiq->pmf->set_resources_size); |
633 | if (r) { |
634 | DRM_ERROR("Failed to lock KIQ (%d).\n" , r); |
635 | spin_unlock(lock: &kiq->ring_lock); |
636 | return r; |
637 | } |
638 | |
639 | if (adev->enable_mes) |
640 | queue_mask = ~0ULL; |
641 | |
642 | kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); |
643 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
644 | j = i + xcc_id * adev->gfx.num_compute_rings; |
645 | kiq->pmf->kiq_map_queues(kiq_ring, |
646 | &adev->gfx.compute_ring[j]); |
647 | } |
648 | |
649 | r = amdgpu_ring_test_helper(ring: kiq_ring); |
650 | spin_unlock(lock: &kiq->ring_lock); |
651 | if (r) |
652 | DRM_ERROR("KCQ enable failed\n" ); |
653 | |
654 | return r; |
655 | } |
656 | |
657 | int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) |
658 | { |
659 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
660 | struct amdgpu_ring *kiq_ring = &kiq->ring; |
661 | int r, i, j; |
662 | |
663 | if (!kiq->pmf || !kiq->pmf->kiq_map_queues) |
664 | return -EINVAL; |
665 | |
666 | amdgpu_device_flush_hdp(adev, NULL); |
667 | |
668 | spin_lock(lock: &kiq->ring_lock); |
669 | /* No need to map kcq on the slave */ |
670 | if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { |
671 | r = amdgpu_ring_alloc(ring: kiq_ring, ndw: kiq->pmf->map_queues_size * |
672 | adev->gfx.num_gfx_rings); |
673 | if (r) { |
674 | DRM_ERROR("Failed to lock KIQ (%d).\n" , r); |
675 | spin_unlock(lock: &kiq->ring_lock); |
676 | return r; |
677 | } |
678 | |
679 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { |
680 | j = i + xcc_id * adev->gfx.num_gfx_rings; |
681 | kiq->pmf->kiq_map_queues(kiq_ring, |
682 | &adev->gfx.gfx_ring[j]); |
683 | } |
684 | } |
685 | |
686 | r = amdgpu_ring_test_helper(ring: kiq_ring); |
687 | spin_unlock(lock: &kiq->ring_lock); |
688 | if (r) |
689 | DRM_ERROR("KCQ enable failed\n" ); |
690 | |
691 | return r; |
692 | } |
693 | |
694 | /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable |
695 | * |
696 | * @adev: amdgpu_device pointer |
697 | * @bool enable true: enable gfx off feature, false: disable gfx off feature |
698 | * |
699 | * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled. |
700 | * 2. other client can send request to disable gfx off feature, the request should be honored. |
701 | * 3. other client can cancel their request of disable gfx off feature |
702 | * 4. other client should not send request to enable gfx off feature before disable gfx off feature. |
703 | */ |
704 | |
705 | void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) |
706 | { |
707 | unsigned long delay = GFX_OFF_DELAY_ENABLE; |
708 | |
709 | if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) |
710 | return; |
711 | |
712 | mutex_lock(&adev->gfx.gfx_off_mutex); |
713 | |
714 | if (enable) { |
715 | /* If the count is already 0, it means there's an imbalance bug somewhere. |
716 | * Note that the bug may be in a different caller than the one which triggers the |
717 | * WARN_ON_ONCE. |
718 | */ |
719 | if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0)) |
720 | goto unlock; |
721 | |
722 | adev->gfx.gfx_off_req_count--; |
723 | |
724 | if (adev->gfx.gfx_off_req_count == 0 && |
725 | !adev->gfx.gfx_off_state) { |
726 | schedule_delayed_work(dwork: &adev->gfx.gfx_off_delay_work, |
727 | delay); |
728 | } |
729 | } else { |
730 | if (adev->gfx.gfx_off_req_count == 0) { |
731 | cancel_delayed_work_sync(dwork: &adev->gfx.gfx_off_delay_work); |
732 | |
733 | if (adev->gfx.gfx_off_state && |
734 | !amdgpu_dpm_set_powergating_by_smu(adev, block_type: AMD_IP_BLOCK_TYPE_GFX, gate: false)) { |
735 | adev->gfx.gfx_off_state = false; |
736 | |
737 | if (adev->gfx.funcs->init_spm_golden) { |
738 | dev_dbg(adev->dev, |
739 | "GFXOFF is disabled, re-init SPM golden settings\n" ); |
740 | amdgpu_gfx_init_spm_golden(adev); |
741 | } |
742 | } |
743 | } |
744 | |
745 | adev->gfx.gfx_off_req_count++; |
746 | } |
747 | |
748 | unlock: |
749 | mutex_unlock(lock: &adev->gfx.gfx_off_mutex); |
750 | } |
751 | |
752 | int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value) |
753 | { |
754 | int r = 0; |
755 | |
756 | mutex_lock(&adev->gfx.gfx_off_mutex); |
757 | |
758 | r = amdgpu_dpm_set_residency_gfxoff(adev, value); |
759 | |
760 | mutex_unlock(lock: &adev->gfx.gfx_off_mutex); |
761 | |
762 | return r; |
763 | } |
764 | |
765 | int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value) |
766 | { |
767 | int r = 0; |
768 | |
769 | mutex_lock(&adev->gfx.gfx_off_mutex); |
770 | |
771 | r = amdgpu_dpm_get_residency_gfxoff(adev, value); |
772 | |
773 | mutex_unlock(lock: &adev->gfx.gfx_off_mutex); |
774 | |
775 | return r; |
776 | } |
777 | |
778 | int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value) |
779 | { |
780 | int r = 0; |
781 | |
782 | mutex_lock(&adev->gfx.gfx_off_mutex); |
783 | |
784 | r = amdgpu_dpm_get_entrycount_gfxoff(adev, value); |
785 | |
786 | mutex_unlock(lock: &adev->gfx.gfx_off_mutex); |
787 | |
788 | return r; |
789 | } |
790 | |
791 | int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) |
792 | { |
793 | |
794 | int r = 0; |
795 | |
796 | mutex_lock(&adev->gfx.gfx_off_mutex); |
797 | |
798 | r = amdgpu_dpm_get_status_gfxoff(adev, value); |
799 | |
800 | mutex_unlock(lock: &adev->gfx.gfx_off_mutex); |
801 | |
802 | return r; |
803 | } |
804 | |
805 | int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) |
806 | { |
807 | int r; |
808 | |
809 | if (amdgpu_ras_is_supported(adev, block: ras_block->block)) { |
810 | if (!amdgpu_persistent_edc_harvesting_supported(adev)) |
811 | amdgpu_ras_reset_error_status(adev, block: AMDGPU_RAS_BLOCK__GFX); |
812 | |
813 | r = amdgpu_ras_block_late_init(adev, ras_block); |
814 | if (r) |
815 | return r; |
816 | |
817 | if (adev->gfx.cp_ecc_error_irq.funcs) { |
818 | r = amdgpu_irq_get(adev, src: &adev->gfx.cp_ecc_error_irq, type: 0); |
819 | if (r) |
820 | goto late_fini; |
821 | } |
822 | } else { |
823 | amdgpu_ras_feature_enable_on_boot(adev, head: ras_block, enable: 0); |
824 | } |
825 | |
826 | return 0; |
827 | late_fini: |
828 | amdgpu_ras_block_late_fini(adev, ras_block); |
829 | return r; |
830 | } |
831 | |
832 | int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev) |
833 | { |
834 | int err = 0; |
835 | struct amdgpu_gfx_ras *ras = NULL; |
836 | |
837 | /* adev->gfx.ras is NULL, which means gfx does not |
838 | * support ras function, then do nothing here. |
839 | */ |
840 | if (!adev->gfx.ras) |
841 | return 0; |
842 | |
843 | ras = adev->gfx.ras; |
844 | |
845 | err = amdgpu_ras_register_ras_block(adev, ras_block_obj: &ras->ras_block); |
846 | if (err) { |
847 | dev_err(adev->dev, "Failed to register gfx ras block!\n" ); |
848 | return err; |
849 | } |
850 | |
851 | strcpy(p: ras->ras_block.ras_comm.name, q: "gfx" ); |
852 | ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX; |
853 | ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; |
854 | adev->gfx.ras_if = &ras->ras_block.ras_comm; |
855 | |
856 | /* If not define special ras_late_init function, use gfx default ras_late_init */ |
857 | if (!ras->ras_block.ras_late_init) |
858 | ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init; |
859 | |
860 | /* If not defined special ras_cb function, use default ras_cb */ |
861 | if (!ras->ras_block.ras_cb) |
862 | ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb; |
863 | |
864 | return 0; |
865 | } |
866 | |
867 | int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, |
868 | struct amdgpu_iv_entry *entry) |
869 | { |
870 | if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler) |
871 | return adev->gfx.ras->poison_consumption_handler(adev, entry); |
872 | |
873 | return 0; |
874 | } |
875 | |
876 | int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, |
877 | void *err_data, |
878 | struct amdgpu_iv_entry *entry) |
879 | { |
880 | /* TODO ue will trigger an interrupt. |
881 | * |
882 | * When “Full RAS” is enabled, the per-IP interrupt sources should |
883 | * be disabled and the driver should only look for the aggregated |
884 | * interrupt via sync flood |
885 | */ |
886 | if (!amdgpu_ras_is_supported(adev, block: AMDGPU_RAS_BLOCK__GFX)) { |
887 | kgd2kfd_set_sram_ecc_flag(kfd: adev->kfd.dev); |
888 | if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops && |
889 | adev->gfx.ras->ras_block.hw_ops->query_ras_error_count) |
890 | adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); |
891 | amdgpu_ras_reset_gpu(adev); |
892 | } |
893 | return AMDGPU_RAS_SUCCESS; |
894 | } |
895 | |
896 | int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, |
897 | struct amdgpu_irq_src *source, |
898 | struct amdgpu_iv_entry *entry) |
899 | { |
900 | struct ras_common_if *ras_if = adev->gfx.ras_if; |
901 | struct ras_dispatch_if ih_data = { |
902 | .entry = entry, |
903 | }; |
904 | |
905 | if (!ras_if) |
906 | return 0; |
907 | |
908 | ih_data.head = *ras_if; |
909 | |
910 | DRM_ERROR("CP ECC ERROR IRQ\n" ); |
911 | amdgpu_ras_interrupt_dispatch(adev, info: &ih_data); |
912 | return 0; |
913 | } |
914 | |
915 | void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev, |
916 | void *ras_error_status, |
917 | void (*func)(struct amdgpu_device *adev, void *ras_error_status, |
918 | int xcc_id)) |
919 | { |
920 | int i; |
921 | int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; |
922 | uint32_t xcc_mask = GENMASK(num_xcc - 1, 0); |
923 | struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; |
924 | |
925 | if (err_data) { |
926 | err_data->ue_count = 0; |
927 | err_data->ce_count = 0; |
928 | } |
929 | |
930 | for_each_inst(i, xcc_mask) |
931 | func(adev, ras_error_status, i); |
932 | } |
933 | |
934 | uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) |
935 | { |
936 | signed long r, cnt = 0; |
937 | unsigned long flags; |
938 | uint32_t seq, reg_val_offs = 0, value = 0; |
939 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; |
940 | struct amdgpu_ring *ring = &kiq->ring; |
941 | |
942 | if (amdgpu_device_skip_hw_access(adev)) |
943 | return 0; |
944 | |
945 | if (adev->mes.ring.sched.ready) |
946 | return amdgpu_mes_rreg(adev, reg); |
947 | |
948 | BUG_ON(!ring->funcs->emit_rreg); |
949 | |
950 | spin_lock_irqsave(&kiq->ring_lock, flags); |
951 | if (amdgpu_device_wb_get(adev, wb: ®_val_offs)) { |
952 | pr_err("critical bug! too many kiq readers\n" ); |
953 | goto failed_unlock; |
954 | } |
955 | amdgpu_ring_alloc(ring, ndw: 32); |
956 | amdgpu_ring_emit_rreg(ring, reg, reg_val_offs); |
957 | r = amdgpu_fence_emit_polling(ring, s: &seq, MAX_KIQ_REG_WAIT); |
958 | if (r) |
959 | goto failed_undo; |
960 | |
961 | amdgpu_ring_commit(ring); |
962 | spin_unlock_irqrestore(lock: &kiq->ring_lock, flags); |
963 | |
964 | r = amdgpu_fence_wait_polling(ring, wait_seq: seq, MAX_KIQ_REG_WAIT); |
965 | |
966 | /* don't wait anymore for gpu reset case because this way may |
967 | * block gpu_recover() routine forever, e.g. this virt_kiq_rreg |
968 | * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will |
969 | * never return if we keep waiting in virt_kiq_rreg, which cause |
970 | * gpu_recover() hang there. |
971 | * |
972 | * also don't wait anymore for IRQ context |
973 | * */ |
974 | if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) |
975 | goto failed_kiq_read; |
976 | |
977 | might_sleep(); |
978 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { |
979 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); |
980 | r = amdgpu_fence_wait_polling(ring, wait_seq: seq, MAX_KIQ_REG_WAIT); |
981 | } |
982 | |
983 | if (cnt > MAX_KIQ_REG_TRY) |
984 | goto failed_kiq_read; |
985 | |
986 | mb(); |
987 | value = adev->wb.wb[reg_val_offs]; |
988 | amdgpu_device_wb_free(adev, wb: reg_val_offs); |
989 | return value; |
990 | |
991 | failed_undo: |
992 | amdgpu_ring_undo(ring); |
993 | failed_unlock: |
994 | spin_unlock_irqrestore(lock: &kiq->ring_lock, flags); |
995 | failed_kiq_read: |
996 | if (reg_val_offs) |
997 | amdgpu_device_wb_free(adev, wb: reg_val_offs); |
998 | dev_err(adev->dev, "failed to read reg:%x\n" , reg); |
999 | return ~0; |
1000 | } |
1001 | |
1002 | void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) |
1003 | { |
1004 | signed long r, cnt = 0; |
1005 | unsigned long flags; |
1006 | uint32_t seq; |
1007 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; |
1008 | struct amdgpu_ring *ring = &kiq->ring; |
1009 | |
1010 | BUG_ON(!ring->funcs->emit_wreg); |
1011 | |
1012 | if (amdgpu_device_skip_hw_access(adev)) |
1013 | return; |
1014 | |
1015 | if (adev->mes.ring.sched.ready) { |
1016 | amdgpu_mes_wreg(adev, reg, val: v); |
1017 | return; |
1018 | } |
1019 | |
1020 | spin_lock_irqsave(&kiq->ring_lock, flags); |
1021 | amdgpu_ring_alloc(ring, ndw: 32); |
1022 | amdgpu_ring_emit_wreg(ring, reg, v); |
1023 | r = amdgpu_fence_emit_polling(ring, s: &seq, MAX_KIQ_REG_WAIT); |
1024 | if (r) |
1025 | goto failed_undo; |
1026 | |
1027 | amdgpu_ring_commit(ring); |
1028 | spin_unlock_irqrestore(lock: &kiq->ring_lock, flags); |
1029 | |
1030 | r = amdgpu_fence_wait_polling(ring, wait_seq: seq, MAX_KIQ_REG_WAIT); |
1031 | |
1032 | /* don't wait anymore for gpu reset case because this way may |
1033 | * block gpu_recover() routine forever, e.g. this virt_kiq_rreg |
1034 | * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will |
1035 | * never return if we keep waiting in virt_kiq_rreg, which cause |
1036 | * gpu_recover() hang there. |
1037 | * |
1038 | * also don't wait anymore for IRQ context |
1039 | * */ |
1040 | if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) |
1041 | goto failed_kiq_write; |
1042 | |
1043 | might_sleep(); |
1044 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { |
1045 | |
1046 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); |
1047 | r = amdgpu_fence_wait_polling(ring, wait_seq: seq, MAX_KIQ_REG_WAIT); |
1048 | } |
1049 | |
1050 | if (cnt > MAX_KIQ_REG_TRY) |
1051 | goto failed_kiq_write; |
1052 | |
1053 | return; |
1054 | |
1055 | failed_undo: |
1056 | amdgpu_ring_undo(ring); |
1057 | spin_unlock_irqrestore(lock: &kiq->ring_lock, flags); |
1058 | failed_kiq_write: |
1059 | dev_err(adev->dev, "failed to write reg:%x\n" , reg); |
1060 | } |
1061 | |
1062 | int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev) |
1063 | { |
1064 | if (amdgpu_num_kcq == -1) { |
1065 | return 8; |
1066 | } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) { |
1067 | dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n" ); |
1068 | return 8; |
1069 | } |
1070 | return amdgpu_num_kcq; |
1071 | } |
1072 | |
1073 | void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, |
1074 | uint32_t ucode_id) |
1075 | { |
1076 | const struct gfx_firmware_header_v1_0 *cp_hdr; |
1077 | const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0; |
1078 | struct amdgpu_firmware_info *info = NULL; |
1079 | const struct firmware *ucode_fw; |
1080 | unsigned int fw_size; |
1081 | |
1082 | switch (ucode_id) { |
1083 | case AMDGPU_UCODE_ID_CP_PFP: |
1084 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) |
1085 | adev->gfx.pfp_fw->data; |
1086 | adev->gfx.pfp_fw_version = |
1087 | le32_to_cpu(cp_hdr->header.ucode_version); |
1088 | adev->gfx.pfp_feature_version = |
1089 | le32_to_cpu(cp_hdr->ucode_feature_version); |
1090 | ucode_fw = adev->gfx.pfp_fw; |
1091 | fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); |
1092 | break; |
1093 | case AMDGPU_UCODE_ID_CP_RS64_PFP: |
1094 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) |
1095 | adev->gfx.pfp_fw->data; |
1096 | adev->gfx.pfp_fw_version = |
1097 | le32_to_cpu(cp_hdr_v2_0->header.ucode_version); |
1098 | adev->gfx.pfp_feature_version = |
1099 | le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); |
1100 | ucode_fw = adev->gfx.pfp_fw; |
1101 | fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes); |
1102 | break; |
1103 | case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: |
1104 | case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: |
1105 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) |
1106 | adev->gfx.pfp_fw->data; |
1107 | ucode_fw = adev->gfx.pfp_fw; |
1108 | fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes); |
1109 | break; |
1110 | case AMDGPU_UCODE_ID_CP_ME: |
1111 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) |
1112 | adev->gfx.me_fw->data; |
1113 | adev->gfx.me_fw_version = |
1114 | le32_to_cpu(cp_hdr->header.ucode_version); |
1115 | adev->gfx.me_feature_version = |
1116 | le32_to_cpu(cp_hdr->ucode_feature_version); |
1117 | ucode_fw = adev->gfx.me_fw; |
1118 | fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); |
1119 | break; |
1120 | case AMDGPU_UCODE_ID_CP_RS64_ME: |
1121 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) |
1122 | adev->gfx.me_fw->data; |
1123 | adev->gfx.me_fw_version = |
1124 | le32_to_cpu(cp_hdr_v2_0->header.ucode_version); |
1125 | adev->gfx.me_feature_version = |
1126 | le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); |
1127 | ucode_fw = adev->gfx.me_fw; |
1128 | fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes); |
1129 | break; |
1130 | case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: |
1131 | case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: |
1132 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) |
1133 | adev->gfx.me_fw->data; |
1134 | ucode_fw = adev->gfx.me_fw; |
1135 | fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes); |
1136 | break; |
1137 | case AMDGPU_UCODE_ID_CP_CE: |
1138 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) |
1139 | adev->gfx.ce_fw->data; |
1140 | adev->gfx.ce_fw_version = |
1141 | le32_to_cpu(cp_hdr->header.ucode_version); |
1142 | adev->gfx.ce_feature_version = |
1143 | le32_to_cpu(cp_hdr->ucode_feature_version); |
1144 | ucode_fw = adev->gfx.ce_fw; |
1145 | fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); |
1146 | break; |
1147 | case AMDGPU_UCODE_ID_CP_MEC1: |
1148 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) |
1149 | adev->gfx.mec_fw->data; |
1150 | adev->gfx.mec_fw_version = |
1151 | le32_to_cpu(cp_hdr->header.ucode_version); |
1152 | adev->gfx.mec_feature_version = |
1153 | le32_to_cpu(cp_hdr->ucode_feature_version); |
1154 | ucode_fw = adev->gfx.mec_fw; |
1155 | fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - |
1156 | le32_to_cpu(cp_hdr->jt_size) * 4; |
1157 | break; |
1158 | case AMDGPU_UCODE_ID_CP_MEC1_JT: |
1159 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) |
1160 | adev->gfx.mec_fw->data; |
1161 | ucode_fw = adev->gfx.mec_fw; |
1162 | fw_size = le32_to_cpu(cp_hdr->jt_size) * 4; |
1163 | break; |
1164 | case AMDGPU_UCODE_ID_CP_MEC2: |
1165 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) |
1166 | adev->gfx.mec2_fw->data; |
1167 | adev->gfx.mec2_fw_version = |
1168 | le32_to_cpu(cp_hdr->header.ucode_version); |
1169 | adev->gfx.mec2_feature_version = |
1170 | le32_to_cpu(cp_hdr->ucode_feature_version); |
1171 | ucode_fw = adev->gfx.mec2_fw; |
1172 | fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - |
1173 | le32_to_cpu(cp_hdr->jt_size) * 4; |
1174 | break; |
1175 | case AMDGPU_UCODE_ID_CP_MEC2_JT: |
1176 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) |
1177 | adev->gfx.mec2_fw->data; |
1178 | ucode_fw = adev->gfx.mec2_fw; |
1179 | fw_size = le32_to_cpu(cp_hdr->jt_size) * 4; |
1180 | break; |
1181 | case AMDGPU_UCODE_ID_CP_RS64_MEC: |
1182 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) |
1183 | adev->gfx.mec_fw->data; |
1184 | adev->gfx.mec_fw_version = |
1185 | le32_to_cpu(cp_hdr_v2_0->header.ucode_version); |
1186 | adev->gfx.mec_feature_version = |
1187 | le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); |
1188 | ucode_fw = adev->gfx.mec_fw; |
1189 | fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes); |
1190 | break; |
1191 | case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: |
1192 | case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: |
1193 | case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: |
1194 | case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: |
1195 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) |
1196 | adev->gfx.mec_fw->data; |
1197 | ucode_fw = adev->gfx.mec_fw; |
1198 | fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes); |
1199 | break; |
1200 | default: |
1201 | break; |
1202 | } |
1203 | |
1204 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
1205 | info = &adev->firmware.ucode[ucode_id]; |
1206 | info->ucode_id = ucode_id; |
1207 | info->fw = ucode_fw; |
1208 | adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE); |
1209 | } |
1210 | } |
1211 | |
1212 | bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id) |
1213 | { |
1214 | return !(xcc_id % (adev->gfx.num_xcc_per_xcp ? |
1215 | adev->gfx.num_xcc_per_xcp : 1)); |
1216 | } |
1217 | |
1218 | static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, |
1219 | struct device_attribute *addr, |
1220 | char *buf) |
1221 | { |
1222 | struct drm_device *ddev = dev_get_drvdata(dev); |
1223 | struct amdgpu_device *adev = drm_to_adev(ddev); |
1224 | int mode; |
1225 | |
1226 | mode = amdgpu_xcp_query_partition_mode(xcp_mgr: adev->xcp_mgr, |
1227 | AMDGPU_XCP_FL_NONE); |
1228 | |
1229 | return sysfs_emit(buf, fmt: "%s\n" , amdgpu_gfx_compute_mode_desc(mode)); |
1230 | } |
1231 | |
1232 | static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, |
1233 | struct device_attribute *addr, |
1234 | const char *buf, size_t count) |
1235 | { |
1236 | struct drm_device *ddev = dev_get_drvdata(dev); |
1237 | struct amdgpu_device *adev = drm_to_adev(ddev); |
1238 | enum amdgpu_gfx_partition mode; |
1239 | int ret = 0, num_xcc; |
1240 | |
1241 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
1242 | if (num_xcc % 2 != 0) |
1243 | return -EINVAL; |
1244 | |
1245 | if (!strncasecmp(s1: "SPX" , s2: buf, strlen("SPX" ))) { |
1246 | mode = AMDGPU_SPX_PARTITION_MODE; |
1247 | } else if (!strncasecmp(s1: "DPX" , s2: buf, strlen("DPX" ))) { |
1248 | /* |
1249 | * DPX mode needs AIDs to be in multiple of 2. |
1250 | * Each AID connects 2 XCCs. |
1251 | */ |
1252 | if (num_xcc%4) |
1253 | return -EINVAL; |
1254 | mode = AMDGPU_DPX_PARTITION_MODE; |
1255 | } else if (!strncasecmp(s1: "TPX" , s2: buf, strlen("TPX" ))) { |
1256 | if (num_xcc != 6) |
1257 | return -EINVAL; |
1258 | mode = AMDGPU_TPX_PARTITION_MODE; |
1259 | } else if (!strncasecmp(s1: "QPX" , s2: buf, strlen("QPX" ))) { |
1260 | if (num_xcc != 8) |
1261 | return -EINVAL; |
1262 | mode = AMDGPU_QPX_PARTITION_MODE; |
1263 | } else if (!strncasecmp(s1: "CPX" , s2: buf, strlen("CPX" ))) { |
1264 | mode = AMDGPU_CPX_PARTITION_MODE; |
1265 | } else { |
1266 | return -EINVAL; |
1267 | } |
1268 | |
1269 | ret = amdgpu_xcp_switch_partition_mode(xcp_mgr: adev->xcp_mgr, mode); |
1270 | |
1271 | if (ret) |
1272 | return ret; |
1273 | |
1274 | return count; |
1275 | } |
1276 | |
1277 | static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev, |
1278 | struct device_attribute *addr, |
1279 | char *buf) |
1280 | { |
1281 | struct drm_device *ddev = dev_get_drvdata(dev); |
1282 | struct amdgpu_device *adev = drm_to_adev(ddev); |
1283 | char *supported_partition; |
1284 | |
1285 | /* TBD */ |
1286 | switch (NUM_XCC(adev->gfx.xcc_mask)) { |
1287 | case 8: |
1288 | supported_partition = "SPX, DPX, QPX, CPX" ; |
1289 | break; |
1290 | case 6: |
1291 | supported_partition = "SPX, TPX, CPX" ; |
1292 | break; |
1293 | case 4: |
1294 | supported_partition = "SPX, DPX, CPX" ; |
1295 | break; |
1296 | /* this seems only existing in emulation phase */ |
1297 | case 2: |
1298 | supported_partition = "SPX, CPX" ; |
1299 | break; |
1300 | default: |
1301 | supported_partition = "Not supported" ; |
1302 | break; |
1303 | } |
1304 | |
1305 | return sysfs_emit(buf, fmt: "%s\n" , supported_partition); |
1306 | } |
1307 | |
1308 | static DEVICE_ATTR(current_compute_partition, 0644, |
1309 | amdgpu_gfx_get_current_compute_partition, |
1310 | amdgpu_gfx_set_compute_partition); |
1311 | |
1312 | static DEVICE_ATTR(available_compute_partition, 0444, |
1313 | amdgpu_gfx_get_available_compute_partition, NULL); |
1314 | |
1315 | int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) |
1316 | { |
1317 | int r; |
1318 | |
1319 | r = device_create_file(device: adev->dev, entry: &dev_attr_current_compute_partition); |
1320 | if (r) |
1321 | return r; |
1322 | |
1323 | r = device_create_file(device: adev->dev, entry: &dev_attr_available_compute_partition); |
1324 | |
1325 | return r; |
1326 | } |
1327 | |
1328 | void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev) |
1329 | { |
1330 | device_remove_file(dev: adev->dev, attr: &dev_attr_current_compute_partition); |
1331 | device_remove_file(dev: adev->dev, attr: &dev_attr_available_compute_partition); |
1332 | } |
1333 | |