1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
26 | * Jerome Glisse |
27 | * Christian König |
28 | */ |
29 | #include <linux/seq_file.h> |
30 | #include <linux/slab.h> |
31 | #include <linux/debugfs.h> |
32 | #include <drm/drmP.h> |
33 | #include <drm/amdgpu_drm.h> |
34 | #include "amdgpu.h" |
35 | #include "atom.h" |
36 | |
37 | /* |
38 | * Rings |
39 | * Most engines on the GPU are fed via ring buffers. Ring |
40 | * buffers are areas of GPU accessible memory that the host |
41 | * writes commands into and the GPU reads commands out of. |
42 | * There is a rptr (read pointer) that determines where the |
43 | * GPU is currently reading, and a wptr (write pointer) |
44 | * which determines where the host has written. When the |
45 | * pointers are equal, the ring is idle. When the host |
46 | * writes commands to the ring buffer, it increments the |
47 | * wptr. The GPU then starts fetching commands and executes |
48 | * them until the pointers are equal again. |
49 | */ |
50 | static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, |
51 | struct amdgpu_ring *ring); |
52 | static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring); |
53 | |
54 | /** |
55 | * amdgpu_ring_alloc - allocate space on the ring buffer |
56 | * |
57 | * @adev: amdgpu_device pointer |
58 | * @ring: amdgpu_ring structure holding ring information |
59 | * @ndw: number of dwords to allocate in the ring buffer |
60 | * |
61 | * Allocate @ndw dwords in the ring buffer (all asics). |
62 | * Returns 0 on success, error on failure. |
63 | */ |
64 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) |
65 | { |
66 | /* Align requested size with padding so unlock_commit can |
67 | * pad safely */ |
68 | ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask; |
69 | |
70 | /* Make sure we aren't trying to allocate more space |
71 | * than the maximum for one submission |
72 | */ |
73 | if (WARN_ON_ONCE(ndw > ring->max_dw)) |
74 | return -ENOMEM; |
75 | |
76 | ring->count_dw = ndw; |
77 | ring->wptr_old = ring->wptr; |
78 | |
79 | if (ring->funcs->begin_use) |
80 | ring->funcs->begin_use(ring); |
81 | |
82 | return 0; |
83 | } |
84 | |
85 | /** amdgpu_ring_insert_nop - insert NOP packets |
86 | * |
87 | * @ring: amdgpu_ring structure holding ring information |
88 | * @count: the number of NOP packets to insert |
89 | * |
90 | * This is the generic insert_nop function for rings except SDMA |
91 | */ |
92 | void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) |
93 | { |
94 | int i; |
95 | |
96 | for (i = 0; i < count; i++) |
97 | amdgpu_ring_write(ring, ring->funcs->nop); |
98 | } |
99 | |
100 | /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets |
101 | * |
102 | * @ring: amdgpu_ring structure holding ring information |
103 | * @ib: IB to add NOP packets to |
104 | * |
105 | * This is the generic pad_ib function for rings except SDMA |
106 | */ |
107 | void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) |
108 | { |
109 | while (ib->length_dw & ring->funcs->align_mask) |
110 | ib->ptr[ib->length_dw++] = ring->funcs->nop; |
111 | } |
112 | |
113 | /** |
114 | * amdgpu_ring_commit - tell the GPU to execute the new |
115 | * commands on the ring buffer |
116 | * |
117 | * @adev: amdgpu_device pointer |
118 | * @ring: amdgpu_ring structure holding ring information |
119 | * |
120 | * Update the wptr (write pointer) to tell the GPU to |
121 | * execute new commands on the ring buffer (all asics). |
122 | */ |
123 | void amdgpu_ring_commit(struct amdgpu_ring *ring) |
124 | { |
125 | uint32_t count; |
126 | |
127 | /* We pad to match fetch size */ |
128 | count = ring->funcs->align_mask + 1 - |
129 | (ring->wptr & ring->funcs->align_mask); |
130 | count %= ring->funcs->align_mask + 1; |
131 | ring->funcs->insert_nop(ring, count); |
132 | |
133 | mb(); |
134 | amdgpu_ring_set_wptr(ring); |
135 | |
136 | if (ring->funcs->end_use) |
137 | ring->funcs->end_use(ring); |
138 | } |
139 | |
140 | /** |
141 | * amdgpu_ring_undo - reset the wptr |
142 | * |
143 | * @ring: amdgpu_ring structure holding ring information |
144 | * |
145 | * Reset the driver's copy of the wptr (all asics). |
146 | */ |
147 | void amdgpu_ring_undo(struct amdgpu_ring *ring) |
148 | { |
149 | ring->wptr = ring->wptr_old; |
150 | |
151 | if (ring->funcs->end_use) |
152 | ring->funcs->end_use(ring); |
153 | } |
154 | |
155 | /** |
156 | * amdgpu_ring_priority_put - restore a ring's priority |
157 | * |
158 | * @ring: amdgpu_ring structure holding the information |
159 | * @priority: target priority |
160 | * |
161 | * Release a request for executing at @priority |
162 | */ |
163 | void amdgpu_ring_priority_put(struct amdgpu_ring *ring, |
164 | enum drm_sched_priority priority) |
165 | { |
166 | int i; |
167 | |
168 | if (!ring->funcs->set_priority) |
169 | return; |
170 | |
171 | if (atomic_dec_return(&ring->num_jobs[priority]) > 0) |
172 | return; |
173 | |
174 | /* no need to restore if the job is already at the lowest priority */ |
175 | if (priority == DRM_SCHED_PRIORITY_NORMAL) |
176 | return; |
177 | |
178 | mutex_lock(&ring->priority_mutex); |
179 | /* something higher prio is executing, no need to decay */ |
180 | if (ring->priority > priority) |
181 | goto out_unlock; |
182 | |
183 | /* decay priority to the next level with a job available */ |
184 | for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) { |
185 | if (i == DRM_SCHED_PRIORITY_NORMAL |
186 | || atomic_read(&ring->num_jobs[i])) { |
187 | ring->priority = i; |
188 | ring->funcs->set_priority(ring, i); |
189 | break; |
190 | } |
191 | } |
192 | |
193 | out_unlock: |
194 | mutex_unlock(&ring->priority_mutex); |
195 | } |
196 | |
197 | /** |
198 | * amdgpu_ring_priority_get - change the ring's priority |
199 | * |
200 | * @ring: amdgpu_ring structure holding the information |
201 | * @priority: target priority |
202 | * |
203 | * Request a ring's priority to be raised to @priority (refcounted). |
204 | */ |
205 | void amdgpu_ring_priority_get(struct amdgpu_ring *ring, |
206 | enum drm_sched_priority priority) |
207 | { |
208 | if (!ring->funcs->set_priority) |
209 | return; |
210 | |
211 | if (atomic_inc_return(&ring->num_jobs[priority]) <= 0) |
212 | return; |
213 | |
214 | mutex_lock(&ring->priority_mutex); |
215 | if (priority <= ring->priority) |
216 | goto out_unlock; |
217 | |
218 | ring->priority = priority; |
219 | ring->funcs->set_priority(ring, priority); |
220 | |
221 | out_unlock: |
222 | mutex_unlock(&ring->priority_mutex); |
223 | } |
224 | |
225 | /** |
226 | * amdgpu_ring_init - init driver ring struct. |
227 | * |
228 | * @adev: amdgpu_device pointer |
229 | * @ring: amdgpu_ring structure holding ring information |
230 | * @max_ndw: maximum number of dw for ring alloc |
231 | * @nop: nop packet for this ring |
232 | * |
233 | * Initialize the driver information for the selected ring (all asics). |
234 | * Returns 0 on success, error on failure. |
235 | */ |
236 | int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, |
237 | unsigned max_dw, struct amdgpu_irq_src *irq_src, |
238 | unsigned irq_type) |
239 | { |
240 | int r, i; |
241 | int sched_hw_submission = amdgpu_sched_hw_submission; |
242 | |
243 | /* Set the hw submission limit higher for KIQ because |
244 | * it's used for a number of gfx/compute tasks by both |
245 | * KFD and KGD which may have outstanding fences and |
246 | * it doesn't really use the gpu scheduler anyway; |
247 | * KIQ tasks get submitted directly to the ring. |
248 | */ |
249 | if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) |
250 | sched_hw_submission = max(sched_hw_submission, 256); |
251 | |
252 | if (ring->adev == NULL) { |
253 | if (adev->num_rings >= AMDGPU_MAX_RINGS) |
254 | return -EINVAL; |
255 | |
256 | ring->adev = adev; |
257 | ring->idx = adev->num_rings++; |
258 | adev->rings[ring->idx] = ring; |
259 | r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission); |
260 | if (r) |
261 | return r; |
262 | } |
263 | |
264 | r = amdgpu_device_wb_get(adev, &ring->rptr_offs); |
265 | if (r) { |
266 | dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n" , r); |
267 | return r; |
268 | } |
269 | |
270 | r = amdgpu_device_wb_get(adev, &ring->wptr_offs); |
271 | if (r) { |
272 | dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n" , r); |
273 | return r; |
274 | } |
275 | |
276 | r = amdgpu_device_wb_get(adev, &ring->fence_offs); |
277 | if (r) { |
278 | dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n" , r); |
279 | return r; |
280 | } |
281 | |
282 | r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs); |
283 | if (r) { |
284 | dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n" , r); |
285 | return r; |
286 | } |
287 | ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4); |
288 | ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs]; |
289 | /* always set cond_exec_polling to CONTINUE */ |
290 | *ring->cond_exe_cpu_addr = 1; |
291 | |
292 | r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); |
293 | if (r) { |
294 | dev_err(adev->dev, "failed initializing fences (%d).\n" , r); |
295 | return r; |
296 | } |
297 | |
298 | ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission); |
299 | |
300 | ring->buf_mask = (ring->ring_size / 4) - 1; |
301 | ring->ptr_mask = ring->funcs->support_64bit_ptrs ? |
302 | 0xffffffffffffffff : ring->buf_mask; |
303 | /* Allocate ring buffer */ |
304 | if (ring->ring_obj == NULL) { |
305 | r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE, |
306 | AMDGPU_GEM_DOMAIN_GTT, |
307 | &ring->ring_obj, |
308 | &ring->gpu_addr, |
309 | (void **)&ring->ring); |
310 | if (r) { |
311 | dev_err(adev->dev, "(%d) ring create failed\n" , r); |
312 | return r; |
313 | } |
314 | amdgpu_ring_clear_ring(ring); |
315 | } |
316 | |
317 | ring->max_dw = max_dw; |
318 | ring->priority = DRM_SCHED_PRIORITY_NORMAL; |
319 | mutex_init(&ring->priority_mutex); |
320 | |
321 | for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i) |
322 | atomic_set(&ring->num_jobs[i], 0); |
323 | |
324 | if (amdgpu_debugfs_ring_init(adev, ring)) { |
325 | DRM_ERROR("Failed to register debugfs file for rings !\n" ); |
326 | } |
327 | |
328 | return 0; |
329 | } |
330 | |
331 | /** |
332 | * amdgpu_ring_fini - tear down the driver ring struct. |
333 | * |
334 | * @adev: amdgpu_device pointer |
335 | * @ring: amdgpu_ring structure holding ring information |
336 | * |
337 | * Tear down the driver information for the selected ring (all asics). |
338 | */ |
339 | void amdgpu_ring_fini(struct amdgpu_ring *ring) |
340 | { |
341 | ring->sched.ready = false; |
342 | |
343 | /* Not to finish a ring which is not initialized */ |
344 | if (!(ring->adev) || !(ring->adev->rings[ring->idx])) |
345 | return; |
346 | |
347 | amdgpu_device_wb_free(ring->adev, ring->rptr_offs); |
348 | amdgpu_device_wb_free(ring->adev, ring->wptr_offs); |
349 | |
350 | amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs); |
351 | amdgpu_device_wb_free(ring->adev, ring->fence_offs); |
352 | |
353 | amdgpu_bo_free_kernel(&ring->ring_obj, |
354 | &ring->gpu_addr, |
355 | (void **)&ring->ring); |
356 | |
357 | amdgpu_debugfs_ring_fini(ring); |
358 | |
359 | dma_fence_put(ring->vmid_wait); |
360 | ring->vmid_wait = NULL; |
361 | ring->me = 0; |
362 | |
363 | ring->adev->rings[ring->idx] = NULL; |
364 | } |
365 | |
366 | /** |
367 | * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper |
368 | * |
369 | * @adev: amdgpu_device pointer |
370 | * @reg0: register to write |
371 | * @reg1: register to wait on |
372 | * @ref: reference value to write/wait on |
373 | * @mask: mask to wait on |
374 | * |
375 | * Helper for rings that don't support write and wait in a |
376 | * single oneshot packet. |
377 | */ |
378 | void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, |
379 | uint32_t reg0, uint32_t reg1, |
380 | uint32_t ref, uint32_t mask) |
381 | { |
382 | amdgpu_ring_emit_wreg(ring, reg0, ref); |
383 | amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); |
384 | } |
385 | |
386 | /** |
387 | * amdgpu_ring_soft_recovery - try to soft recover a ring lockup |
388 | * |
389 | * @ring: ring to try the recovery on |
390 | * @vmid: VMID we try to get going again |
391 | * @fence: timedout fence |
392 | * |
393 | * Tries to get a ring proceeding again when it is stuck. |
394 | */ |
395 | bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, |
396 | struct dma_fence *fence) |
397 | { |
398 | ktime_t deadline = ktime_add_us(ktime_get(), 10000); |
399 | |
400 | if (!ring->funcs->soft_recovery || !fence) |
401 | return false; |
402 | |
403 | atomic_inc(&ring->adev->gpu_reset_counter); |
404 | while (!dma_fence_is_signaled(fence) && |
405 | ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) |
406 | ring->funcs->soft_recovery(ring, vmid); |
407 | |
408 | return dma_fence_is_signaled(fence); |
409 | } |
410 | |
411 | /* |
412 | * Debugfs info |
413 | */ |
414 | #if defined(CONFIG_DEBUG_FS) |
415 | |
416 | /* Layout of file is 12 bytes consisting of |
417 | * - rptr |
418 | * - wptr |
419 | * - driver's copy of wptr |
420 | * |
421 | * followed by n-words of ring data |
422 | */ |
423 | static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf, |
424 | size_t size, loff_t *pos) |
425 | { |
426 | struct amdgpu_ring *ring = file_inode(f)->i_private; |
427 | int r, i; |
428 | uint32_t value, result, early[3]; |
429 | |
430 | if (*pos & 3 || size & 3) |
431 | return -EINVAL; |
432 | |
433 | result = 0; |
434 | |
435 | if (*pos < 12) { |
436 | early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask; |
437 | early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask; |
438 | early[2] = ring->wptr & ring->buf_mask; |
439 | for (i = *pos / 4; i < 3 && size; i++) { |
440 | r = put_user(early[i], (uint32_t *)buf); |
441 | if (r) |
442 | return r; |
443 | buf += 4; |
444 | result += 4; |
445 | size -= 4; |
446 | *pos += 4; |
447 | } |
448 | } |
449 | |
450 | while (size) { |
451 | if (*pos >= (ring->ring_size + 12)) |
452 | return result; |
453 | |
454 | value = ring->ring[(*pos - 12)/4]; |
455 | r = put_user(value, (uint32_t*)buf); |
456 | if (r) |
457 | return r; |
458 | buf += 4; |
459 | result += 4; |
460 | size -= 4; |
461 | *pos += 4; |
462 | } |
463 | |
464 | return result; |
465 | } |
466 | |
467 | static const struct file_operations amdgpu_debugfs_ring_fops = { |
468 | .owner = THIS_MODULE, |
469 | .read = amdgpu_debugfs_ring_read, |
470 | .llseek = default_llseek |
471 | }; |
472 | |
473 | #endif |
474 | |
475 | static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, |
476 | struct amdgpu_ring *ring) |
477 | { |
478 | #if defined(CONFIG_DEBUG_FS) |
479 | struct drm_minor *minor = adev->ddev->primary; |
480 | struct dentry *ent, *root = minor->debugfs_root; |
481 | char name[32]; |
482 | |
483 | sprintf(name, "amdgpu_ring_%s" , ring->name); |
484 | |
485 | ent = debugfs_create_file(name, |
486 | S_IFREG | S_IRUGO, root, |
487 | ring, &amdgpu_debugfs_ring_fops); |
488 | if (!ent) |
489 | return -ENOMEM; |
490 | |
491 | i_size_write(ent->d_inode, ring->ring_size + 12); |
492 | ring->ent = ent; |
493 | #endif |
494 | return 0; |
495 | } |
496 | |
497 | static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring) |
498 | { |
499 | #if defined(CONFIG_DEBUG_FS) |
500 | debugfs_remove(ring->ent); |
501 | #endif |
502 | } |
503 | |
504 | /** |
505 | * amdgpu_ring_test_helper - tests ring and set sched readiness status |
506 | * |
507 | * @ring: ring to try the recovery on |
508 | * |
509 | * Tests ring and set sched readiness status |
510 | * |
511 | * Returns 0 on success, error on failure. |
512 | */ |
513 | int amdgpu_ring_test_helper(struct amdgpu_ring *ring) |
514 | { |
515 | struct amdgpu_device *adev = ring->adev; |
516 | int r; |
517 | |
518 | r = amdgpu_ring_test_ring(ring); |
519 | if (r) |
520 | DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n" , |
521 | ring->name, r); |
522 | else |
523 | DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n" , |
524 | ring->name); |
525 | |
526 | ring->sched.ready = !r; |
527 | return r; |
528 | } |
529 | |