1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
20 | * |
21 | * The above copyright notice and this permission notice (including the |
22 | * next paragraph) shall be included in all copies or substantial portions |
23 | * of the Software. |
24 | * |
25 | */ |
26 | /* |
27 | * Authors: |
28 | * Jerome Glisse <glisse@freedesktop.org> |
29 | * Dave Airlie |
30 | */ |
31 | |
32 | #include <linux/atomic.h> |
33 | #include <linux/firmware.h> |
34 | #include <linux/kref.h> |
35 | #include <linux/sched/signal.h> |
36 | #include <linux/seq_file.h> |
37 | #include <linux/slab.h> |
38 | #include <linux/wait.h> |
39 | |
40 | #include <drm/drm_device.h> |
41 | #include <drm/drm_file.h> |
42 | |
43 | #include "radeon.h" |
44 | #include "radeon_reg.h" |
45 | #include "radeon_trace.h" |
46 | |
47 | /* |
48 | * Fences mark an event in the GPUs pipeline and are used |
49 | * for GPU/CPU synchronization. When the fence is written, |
50 | * it is expected that all buffers associated with that fence |
51 | * are no longer in use by the associated ring on the GPU and |
52 | * that the relevant GPU caches have been flushed. Whether |
53 | * we use a scratch register or memory location depends on the asic |
54 | * and whether writeback is enabled. |
55 | */ |
56 | |
57 | /** |
58 | * radeon_fence_write - write a fence value |
59 | * |
60 | * @rdev: radeon_device pointer |
61 | * @seq: sequence number to write |
62 | * @ring: ring index the fence is associated with |
63 | * |
64 | * Writes a fence value to memory or a scratch register (all asics). |
65 | */ |
66 | static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) |
67 | { |
68 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
69 | |
70 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
71 | if (drv->cpu_addr) |
72 | *drv->cpu_addr = cpu_to_le32(seq); |
73 | } else { |
74 | WREG32(drv->scratch_reg, seq); |
75 | } |
76 | } |
77 | |
78 | /** |
79 | * radeon_fence_read - read a fence value |
80 | * |
81 | * @rdev: radeon_device pointer |
82 | * @ring: ring index the fence is associated with |
83 | * |
84 | * Reads a fence value from memory or a scratch register (all asics). |
85 | * Returns the value of the fence read from memory or register. |
86 | */ |
87 | static u32 radeon_fence_read(struct radeon_device *rdev, int ring) |
88 | { |
89 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
90 | u32 seq = 0; |
91 | |
92 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
93 | if (drv->cpu_addr) |
94 | seq = le32_to_cpu(*drv->cpu_addr); |
95 | else |
96 | seq = lower_32_bits(atomic64_read(&drv->last_seq)); |
97 | } else { |
98 | seq = RREG32(drv->scratch_reg); |
99 | } |
100 | return seq; |
101 | } |
102 | |
103 | /** |
104 | * radeon_fence_schedule_check - schedule lockup check |
105 | * |
106 | * @rdev: radeon_device pointer |
107 | * @ring: ring index we should work with |
108 | * |
109 | * Queues a delayed work item to check for lockups. |
110 | */ |
111 | static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring) |
112 | { |
113 | /* |
114 | * Do not reset the timer here with mod_delayed_work, |
115 | * this can livelock in an interaction with TTM delayed destroy. |
116 | */ |
117 | queue_delayed_work(wq: system_power_efficient_wq, |
118 | dwork: &rdev->fence_drv[ring].lockup_work, |
119 | RADEON_FENCE_JIFFIES_TIMEOUT); |
120 | } |
121 | |
122 | /** |
123 | * radeon_fence_emit - emit a fence on the requested ring |
124 | * |
125 | * @rdev: radeon_device pointer |
126 | * @fence: radeon fence object |
127 | * @ring: ring index the fence is associated with |
128 | * |
129 | * Emits a fence command on the requested ring (all asics). |
130 | * Returns 0 on success, -ENOMEM on failure. |
131 | */ |
132 | int radeon_fence_emit(struct radeon_device *rdev, |
133 | struct radeon_fence **fence, |
134 | int ring) |
135 | { |
136 | u64 seq; |
137 | |
138 | /* we are protected by the ring emission mutex */ |
139 | *fence = kmalloc(size: sizeof(struct radeon_fence), GFP_KERNEL); |
140 | if ((*fence) == NULL) |
141 | return -ENOMEM; |
142 | |
143 | (*fence)->rdev = rdev; |
144 | (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring]; |
145 | (*fence)->ring = ring; |
146 | (*fence)->is_vm_update = false; |
147 | dma_fence_init(fence: &(*fence)->base, ops: &radeon_fence_ops, |
148 | lock: &rdev->fence_queue.lock, |
149 | context: rdev->fence_context + ring, |
150 | seqno: seq); |
151 | radeon_fence_ring_emit(rdev, ring, *fence); |
152 | trace_radeon_fence_emit(dev: rdev->ddev, ring, seqno: (*fence)->seq); |
153 | radeon_fence_schedule_check(rdev, ring); |
154 | return 0; |
155 | } |
156 | |
157 | /* |
158 | * radeon_fence_check_signaled - callback from fence_queue |
159 | * |
160 | * this function is called with fence_queue lock held, which is also used |
161 | * for the fence locking itself, so unlocked variants are used for |
162 | * fence_signal, and remove_wait_queue. |
163 | */ |
164 | static int radeon_fence_check_signaled(wait_queue_entry_t *wait, |
165 | unsigned int mode, int flags, void *key) |
166 | { |
167 | struct radeon_fence *fence; |
168 | u64 seq; |
169 | |
170 | fence = container_of(wait, struct radeon_fence, fence_wake); |
171 | |
172 | /* |
173 | * We cannot use radeon_fence_process here because we're already |
174 | * in the waitqueue, in a call from wake_up_all. |
175 | */ |
176 | seq = atomic64_read(v: &fence->rdev->fence_drv[fence->ring].last_seq); |
177 | if (seq >= fence->seq) { |
178 | dma_fence_signal_locked(fence: &fence->base); |
179 | radeon_irq_kms_sw_irq_put(rdev: fence->rdev, ring: fence->ring); |
180 | __remove_wait_queue(wq_head: &fence->rdev->fence_queue, wq_entry: &fence->fence_wake); |
181 | dma_fence_put(fence: &fence->base); |
182 | } |
183 | return 0; |
184 | } |
185 | |
186 | /** |
187 | * radeon_fence_activity - check for fence activity |
188 | * |
189 | * @rdev: radeon_device pointer |
190 | * @ring: ring index the fence is associated with |
191 | * |
192 | * Checks the current fence value and calculates the last |
193 | * signalled fence value. Returns true if activity occured |
194 | * on the ring, and the fence_queue should be waken up. |
195 | */ |
196 | static bool radeon_fence_activity(struct radeon_device *rdev, int ring) |
197 | { |
198 | uint64_t seq, last_seq, last_emitted; |
199 | unsigned int count_loop = 0; |
200 | bool wake = false; |
201 | |
202 | /* Note there is a scenario here for an infinite loop but it's |
203 | * very unlikely to happen. For it to happen, the current polling |
204 | * process need to be interrupted by another process and another |
205 | * process needs to update the last_seq btw the atomic read and |
206 | * xchg of the current process. |
207 | * |
208 | * More over for this to go in infinite loop there need to be |
209 | * continuously new fence signaled ie radeon_fence_read needs |
210 | * to return a different value each time for both the currently |
211 | * polling process and the other process that xchg the last_seq |
212 | * btw atomic read and xchg of the current process. And the |
213 | * value the other process set as last seq must be higher than |
214 | * the seq value we just read. Which means that current process |
215 | * need to be interrupted after radeon_fence_read and before |
216 | * atomic xchg. |
217 | * |
218 | * To be even more safe we count the number of time we loop and |
219 | * we bail after 10 loop just accepting the fact that we might |
220 | * have temporarly set the last_seq not to the true real last |
221 | * seq but to an older one. |
222 | */ |
223 | last_seq = atomic64_read(v: &rdev->fence_drv[ring].last_seq); |
224 | do { |
225 | last_emitted = rdev->fence_drv[ring].sync_seq[ring]; |
226 | seq = radeon_fence_read(rdev, ring); |
227 | seq |= last_seq & 0xffffffff00000000LL; |
228 | if (seq < last_seq) { |
229 | seq &= 0xffffffff; |
230 | seq |= last_emitted & 0xffffffff00000000LL; |
231 | } |
232 | |
233 | if (seq <= last_seq || seq > last_emitted) |
234 | break; |
235 | |
236 | /* If we loop over we don't want to return without |
237 | * checking if a fence is signaled as it means that the |
238 | * seq we just read is different from the previous on. |
239 | */ |
240 | wake = true; |
241 | last_seq = seq; |
242 | if ((count_loop++) > 10) { |
243 | /* We looped over too many time leave with the |
244 | * fact that we might have set an older fence |
245 | * seq then the current real last seq as signaled |
246 | * by the hw. |
247 | */ |
248 | break; |
249 | } |
250 | } while (atomic64_xchg(v: &rdev->fence_drv[ring].last_seq, new: seq) > seq); |
251 | |
252 | if (seq < last_emitted) |
253 | radeon_fence_schedule_check(rdev, ring); |
254 | |
255 | return wake; |
256 | } |
257 | |
258 | /** |
259 | * radeon_fence_check_lockup - check for hardware lockup |
260 | * |
261 | * @work: delayed work item |
262 | * |
263 | * Checks for fence activity and if there is none probe |
264 | * the hardware if a lockup occured. |
265 | */ |
266 | static void radeon_fence_check_lockup(struct work_struct *work) |
267 | { |
268 | struct radeon_fence_driver *fence_drv; |
269 | struct radeon_device *rdev; |
270 | int ring; |
271 | |
272 | fence_drv = container_of(work, struct radeon_fence_driver, |
273 | lockup_work.work); |
274 | rdev = fence_drv->rdev; |
275 | ring = fence_drv - &rdev->fence_drv[0]; |
276 | |
277 | if (!down_read_trylock(sem: &rdev->exclusive_lock)) { |
278 | /* just reschedule the check if a reset is going on */ |
279 | radeon_fence_schedule_check(rdev, ring); |
280 | return; |
281 | } |
282 | |
283 | if (fence_drv->delayed_irq && rdev->irq.installed) { |
284 | unsigned long irqflags; |
285 | |
286 | fence_drv->delayed_irq = false; |
287 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
288 | radeon_irq_set(rdev); |
289 | spin_unlock_irqrestore(lock: &rdev->irq.lock, flags: irqflags); |
290 | } |
291 | |
292 | if (radeon_fence_activity(rdev, ring)) |
293 | wake_up_all(&rdev->fence_queue); |
294 | |
295 | else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { |
296 | |
297 | /* good news we believe it's a lockup */ |
298 | dev_warn(rdev->dev, "GPU lockup (current fence id 0x%016llx last fence id 0x%016llx on ring %d)\n" , |
299 | (uint64_t)atomic64_read(&fence_drv->last_seq), |
300 | fence_drv->sync_seq[ring], ring); |
301 | |
302 | /* remember that we need an reset */ |
303 | rdev->needs_reset = true; |
304 | wake_up_all(&rdev->fence_queue); |
305 | } |
306 | up_read(sem: &rdev->exclusive_lock); |
307 | } |
308 | |
309 | /** |
310 | * radeon_fence_process - process a fence |
311 | * |
312 | * @rdev: radeon_device pointer |
313 | * @ring: ring index the fence is associated with |
314 | * |
315 | * Checks the current fence value and wakes the fence queue |
316 | * if the sequence number has increased (all asics). |
317 | */ |
318 | void radeon_fence_process(struct radeon_device *rdev, int ring) |
319 | { |
320 | if (radeon_fence_activity(rdev, ring)) |
321 | wake_up_all(&rdev->fence_queue); |
322 | } |
323 | |
324 | /** |
325 | * radeon_fence_seq_signaled - check if a fence sequence number has signaled |
326 | * |
327 | * @rdev: radeon device pointer |
328 | * @seq: sequence number |
329 | * @ring: ring index the fence is associated with |
330 | * |
331 | * Check if the last signaled fence sequnce number is >= the requested |
332 | * sequence number (all asics). |
333 | * Returns true if the fence has signaled (current fence value |
334 | * is >= requested value) or false if it has not (current fence |
335 | * value is < the requested value. Helper function for |
336 | * radeon_fence_signaled(). |
337 | */ |
338 | static bool radeon_fence_seq_signaled(struct radeon_device *rdev, |
339 | u64 seq, unsigned int ring) |
340 | { |
341 | if (atomic64_read(v: &rdev->fence_drv[ring].last_seq) >= seq) |
342 | return true; |
343 | |
344 | /* poll new last sequence at least once */ |
345 | radeon_fence_process(rdev, ring); |
346 | if (atomic64_read(v: &rdev->fence_drv[ring].last_seq) >= seq) |
347 | return true; |
348 | |
349 | return false; |
350 | } |
351 | |
352 | static bool radeon_fence_is_signaled(struct dma_fence *f) |
353 | { |
354 | struct radeon_fence *fence = to_radeon_fence(f); |
355 | struct radeon_device *rdev = fence->rdev; |
356 | unsigned int ring = fence->ring; |
357 | u64 seq = fence->seq; |
358 | |
359 | if (atomic64_read(v: &rdev->fence_drv[ring].last_seq) >= seq) |
360 | return true; |
361 | |
362 | if (down_read_trylock(sem: &rdev->exclusive_lock)) { |
363 | radeon_fence_process(rdev, ring); |
364 | up_read(sem: &rdev->exclusive_lock); |
365 | |
366 | if (atomic64_read(v: &rdev->fence_drv[ring].last_seq) >= seq) |
367 | return true; |
368 | } |
369 | return false; |
370 | } |
371 | |
372 | /** |
373 | * radeon_fence_enable_signaling - enable signalling on fence |
374 | * @f: fence |
375 | * |
376 | * This function is called with fence_queue lock held, and adds a callback |
377 | * to fence_queue that checks if this fence is signaled, and if so it |
378 | * signals the fence and removes itself. |
379 | */ |
380 | static bool radeon_fence_enable_signaling(struct dma_fence *f) |
381 | { |
382 | struct radeon_fence *fence = to_radeon_fence(f); |
383 | struct radeon_device *rdev = fence->rdev; |
384 | |
385 | if (atomic64_read(v: &rdev->fence_drv[fence->ring].last_seq) >= fence->seq) |
386 | return false; |
387 | |
388 | if (down_read_trylock(sem: &rdev->exclusive_lock)) { |
389 | radeon_irq_kms_sw_irq_get(rdev, ring: fence->ring); |
390 | |
391 | if (radeon_fence_activity(rdev, ring: fence->ring)) |
392 | wake_up_all_locked(&rdev->fence_queue); |
393 | |
394 | /* did fence get signaled after we enabled the sw irq? */ |
395 | if (atomic64_read(v: &rdev->fence_drv[fence->ring].last_seq) >= fence->seq) { |
396 | radeon_irq_kms_sw_irq_put(rdev, ring: fence->ring); |
397 | up_read(sem: &rdev->exclusive_lock); |
398 | return false; |
399 | } |
400 | |
401 | up_read(sem: &rdev->exclusive_lock); |
402 | } else { |
403 | /* we're probably in a lockup, lets not fiddle too much */ |
404 | if (radeon_irq_kms_sw_irq_get_delayed(rdev, ring: fence->ring)) |
405 | rdev->fence_drv[fence->ring].delayed_irq = true; |
406 | radeon_fence_schedule_check(rdev, ring: fence->ring); |
407 | } |
408 | |
409 | fence->fence_wake.flags = 0; |
410 | fence->fence_wake.private = NULL; |
411 | fence->fence_wake.func = radeon_fence_check_signaled; |
412 | __add_wait_queue(wq_head: &rdev->fence_queue, wq_entry: &fence->fence_wake); |
413 | dma_fence_get(fence: f); |
414 | return true; |
415 | } |
416 | |
417 | /** |
418 | * radeon_fence_signaled - check if a fence has signaled |
419 | * |
420 | * @fence: radeon fence object |
421 | * |
422 | * Check if the requested fence has signaled (all asics). |
423 | * Returns true if the fence has signaled or false if it has not. |
424 | */ |
425 | bool radeon_fence_signaled(struct radeon_fence *fence) |
426 | { |
427 | if (!fence) |
428 | return true; |
429 | |
430 | if (radeon_fence_seq_signaled(rdev: fence->rdev, seq: fence->seq, ring: fence->ring)) { |
431 | dma_fence_signal(fence: &fence->base); |
432 | return true; |
433 | } |
434 | return false; |
435 | } |
436 | |
437 | /** |
438 | * radeon_fence_any_seq_signaled - check if any sequence number is signaled |
439 | * |
440 | * @rdev: radeon device pointer |
441 | * @seq: sequence numbers |
442 | * |
443 | * Check if the last signaled fence sequnce number is >= the requested |
444 | * sequence number (all asics). |
445 | * Returns true if any has signaled (current value is >= requested value) |
446 | * or false if it has not. Helper function for radeon_fence_wait_seq. |
447 | */ |
448 | static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) |
449 | { |
450 | unsigned int i; |
451 | |
452 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
453 | if (seq[i] && radeon_fence_seq_signaled(rdev, seq: seq[i], ring: i)) |
454 | return true; |
455 | } |
456 | return false; |
457 | } |
458 | |
459 | /** |
460 | * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers |
461 | * |
462 | * @rdev: radeon device pointer |
463 | * @target_seq: sequence number(s) we want to wait for |
464 | * @intr: use interruptable sleep |
465 | * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait |
466 | * |
467 | * Wait for the requested sequence number(s) to be written by any ring |
468 | * (all asics). Sequnce number array is indexed by ring id. |
469 | * @intr selects whether to use interruptable (true) or non-interruptable |
470 | * (false) sleep when waiting for the sequence number. Helper function |
471 | * for radeon_fence_wait_*(). |
472 | * Returns remaining time if the sequence number has passed, 0 when |
473 | * the wait timeout, or an error for all other cases. |
474 | * -EDEADLK is returned when a GPU lockup has been detected. |
475 | */ |
476 | static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev, |
477 | u64 *target_seq, bool intr, |
478 | long timeout) |
479 | { |
480 | long r; |
481 | int i; |
482 | |
483 | if (radeon_fence_any_seq_signaled(rdev, seq: target_seq)) |
484 | return timeout; |
485 | |
486 | /* enable IRQs and tracing */ |
487 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
488 | if (!target_seq[i]) |
489 | continue; |
490 | |
491 | trace_radeon_fence_wait_begin(dev: rdev->ddev, ring: i, seqno: target_seq[i]); |
492 | radeon_irq_kms_sw_irq_get(rdev, ring: i); |
493 | } |
494 | |
495 | if (intr) { |
496 | r = wait_event_interruptible_timeout(rdev->fence_queue, ( |
497 | radeon_fence_any_seq_signaled(rdev, target_seq) |
498 | || rdev->needs_reset), timeout); |
499 | } else { |
500 | r = wait_event_timeout(rdev->fence_queue, ( |
501 | radeon_fence_any_seq_signaled(rdev, target_seq) |
502 | || rdev->needs_reset), timeout); |
503 | } |
504 | |
505 | if (rdev->needs_reset) |
506 | r = -EDEADLK; |
507 | |
508 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
509 | if (!target_seq[i]) |
510 | continue; |
511 | |
512 | radeon_irq_kms_sw_irq_put(rdev, ring: i); |
513 | trace_radeon_fence_wait_end(dev: rdev->ddev, ring: i, seqno: target_seq[i]); |
514 | } |
515 | |
516 | return r; |
517 | } |
518 | |
519 | /** |
520 | * radeon_fence_wait_timeout - wait for a fence to signal with timeout |
521 | * |
522 | * @fence: radeon fence object |
523 | * @intr: use interruptible sleep |
524 | * |
525 | * Wait for the requested fence to signal (all asics). |
526 | * @intr selects whether to use interruptable (true) or non-interruptable |
527 | * (false) sleep when waiting for the fence. |
528 | * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait |
529 | * Returns remaining time if the sequence number has passed, 0 when |
530 | * the wait timeout, or an error for all other cases. |
531 | */ |
532 | long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout) |
533 | { |
534 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
535 | long r; |
536 | |
537 | /* |
538 | * This function should not be called on !radeon fences. |
539 | * If this is the case, it would mean this function can |
540 | * also be called on radeon fences belonging to another card. |
541 | * exclusive_lock is not held in that case. |
542 | */ |
543 | if (WARN_ON_ONCE(!to_radeon_fence(&fence->base))) |
544 | return dma_fence_wait(fence: &fence->base, intr); |
545 | |
546 | seq[fence->ring] = fence->seq; |
547 | r = radeon_fence_wait_seq_timeout(rdev: fence->rdev, target_seq: seq, intr, timeout); |
548 | if (r <= 0) |
549 | return r; |
550 | |
551 | dma_fence_signal(fence: &fence->base); |
552 | return r; |
553 | } |
554 | |
555 | /** |
556 | * radeon_fence_wait - wait for a fence to signal |
557 | * |
558 | * @fence: radeon fence object |
559 | * @intr: use interruptible sleep |
560 | * |
561 | * Wait for the requested fence to signal (all asics). |
562 | * @intr selects whether to use interruptable (true) or non-interruptable |
563 | * (false) sleep when waiting for the fence. |
564 | * Returns 0 if the fence has passed, error for all other cases. |
565 | */ |
566 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
567 | { |
568 | long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); |
569 | |
570 | if (r > 0) |
571 | return 0; |
572 | else |
573 | return r; |
574 | } |
575 | |
576 | /** |
577 | * radeon_fence_wait_any - wait for a fence to signal on any ring |
578 | * |
579 | * @rdev: radeon device pointer |
580 | * @fences: radeon fence object(s) |
581 | * @intr: use interruptable sleep |
582 | * |
583 | * Wait for any requested fence to signal (all asics). Fence |
584 | * array is indexed by ring id. @intr selects whether to use |
585 | * interruptable (true) or non-interruptable (false) sleep when |
586 | * waiting for the fences. Used by the suballocator. |
587 | * Returns 0 if any fence has passed, error for all other cases. |
588 | */ |
589 | int radeon_fence_wait_any(struct radeon_device *rdev, |
590 | struct radeon_fence **fences, |
591 | bool intr) |
592 | { |
593 | uint64_t seq[RADEON_NUM_RINGS]; |
594 | unsigned int i, num_rings = 0; |
595 | long r; |
596 | |
597 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
598 | seq[i] = 0; |
599 | |
600 | if (!fences[i]) |
601 | continue; |
602 | |
603 | seq[i] = fences[i]->seq; |
604 | ++num_rings; |
605 | } |
606 | |
607 | /* nothing to wait for ? */ |
608 | if (num_rings == 0) |
609 | return -ENOENT; |
610 | |
611 | r = radeon_fence_wait_seq_timeout(rdev, target_seq: seq, intr, MAX_SCHEDULE_TIMEOUT); |
612 | if (r < 0) |
613 | return r; |
614 | |
615 | return 0; |
616 | } |
617 | |
618 | /** |
619 | * radeon_fence_wait_next - wait for the next fence to signal |
620 | * |
621 | * @rdev: radeon device pointer |
622 | * @ring: ring index the fence is associated with |
623 | * |
624 | * Wait for the next fence on the requested ring to signal (all asics). |
625 | * Returns 0 if the next fence has passed, error for all other cases. |
626 | * Caller must hold ring lock. |
627 | */ |
628 | int radeon_fence_wait_next(struct radeon_device *rdev, int ring) |
629 | { |
630 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
631 | long r; |
632 | |
633 | seq[ring] = atomic64_read(v: &rdev->fence_drv[ring].last_seq) + 1ULL; |
634 | if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { |
635 | /* nothing to wait for, last_seq is already |
636 | * the last emited fence |
637 | */ |
638 | return -ENOENT; |
639 | } |
640 | |
641 | r = radeon_fence_wait_seq_timeout(rdev, target_seq: seq, intr: false, MAX_SCHEDULE_TIMEOUT); |
642 | if (r < 0) |
643 | return r; |
644 | |
645 | return 0; |
646 | } |
647 | |
648 | /** |
649 | * radeon_fence_wait_empty - wait for all fences to signal |
650 | * |
651 | * @rdev: radeon device pointer |
652 | * @ring: ring index the fence is associated with |
653 | * |
654 | * Wait for all fences on the requested ring to signal (all asics). |
655 | * Returns 0 if the fences have passed, error for all other cases. |
656 | * Caller must hold ring lock. |
657 | */ |
658 | int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) |
659 | { |
660 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
661 | long r; |
662 | |
663 | seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; |
664 | if (!seq[ring]) |
665 | return 0; |
666 | |
667 | r = radeon_fence_wait_seq_timeout(rdev, target_seq: seq, intr: false, MAX_SCHEDULE_TIMEOUT); |
668 | if (r < 0) { |
669 | if (r == -EDEADLK) |
670 | return -EDEADLK; |
671 | |
672 | dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n" , |
673 | ring, r); |
674 | } |
675 | return 0; |
676 | } |
677 | |
678 | /** |
679 | * radeon_fence_ref - take a ref on a fence |
680 | * |
681 | * @fence: radeon fence object |
682 | * |
683 | * Take a reference on a fence (all asics). |
684 | * Returns the fence. |
685 | */ |
686 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
687 | { |
688 | dma_fence_get(fence: &fence->base); |
689 | return fence; |
690 | } |
691 | |
692 | /** |
693 | * radeon_fence_unref - remove a ref on a fence |
694 | * |
695 | * @fence: radeon fence object |
696 | * |
697 | * Remove a reference on a fence (all asics). |
698 | */ |
699 | void radeon_fence_unref(struct radeon_fence **fence) |
700 | { |
701 | struct radeon_fence *tmp = *fence; |
702 | |
703 | *fence = NULL; |
704 | if (tmp) |
705 | dma_fence_put(fence: &tmp->base); |
706 | } |
707 | |
708 | /** |
709 | * radeon_fence_count_emitted - get the count of emitted fences |
710 | * |
711 | * @rdev: radeon device pointer |
712 | * @ring: ring index the fence is associated with |
713 | * |
714 | * Get the number of fences emitted on the requested ring (all asics). |
715 | * Returns the number of emitted fences on the ring. Used by the |
716 | * dynpm code to ring track activity. |
717 | */ |
718 | unsigned int radeon_fence_count_emitted(struct radeon_device *rdev, int ring) |
719 | { |
720 | uint64_t emitted; |
721 | |
722 | /* We are not protected by ring lock when reading the last sequence |
723 | * but it's ok to report slightly wrong fence count here. |
724 | */ |
725 | radeon_fence_process(rdev, ring); |
726 | emitted = rdev->fence_drv[ring].sync_seq[ring] |
727 | - atomic64_read(v: &rdev->fence_drv[ring].last_seq); |
728 | /* to avoid 32bits warp around */ |
729 | if (emitted > 0x10000000) |
730 | emitted = 0x10000000; |
731 | |
732 | return (unsigned int)emitted; |
733 | } |
734 | |
735 | /** |
736 | * radeon_fence_need_sync - do we need a semaphore |
737 | * |
738 | * @fence: radeon fence object |
739 | * @dst_ring: which ring to check against |
740 | * |
741 | * Check if the fence needs to be synced against another ring |
742 | * (all asics). If so, we need to emit a semaphore. |
743 | * Returns true if we need to sync with another ring, false if |
744 | * not. |
745 | */ |
746 | bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) |
747 | { |
748 | struct radeon_fence_driver *fdrv; |
749 | |
750 | if (!fence) |
751 | return false; |
752 | |
753 | if (fence->ring == dst_ring) |
754 | return false; |
755 | |
756 | /* we are protected by the ring mutex */ |
757 | fdrv = &fence->rdev->fence_drv[dst_ring]; |
758 | if (fence->seq <= fdrv->sync_seq[fence->ring]) |
759 | return false; |
760 | |
761 | return true; |
762 | } |
763 | |
764 | /** |
765 | * radeon_fence_note_sync - record the sync point |
766 | * |
767 | * @fence: radeon fence object |
768 | * @dst_ring: which ring to check against |
769 | * |
770 | * Note the sequence number at which point the fence will |
771 | * be synced with the requested ring (all asics). |
772 | */ |
773 | void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) |
774 | { |
775 | struct radeon_fence_driver *dst, *src; |
776 | unsigned int i; |
777 | |
778 | if (!fence) |
779 | return; |
780 | |
781 | if (fence->ring == dst_ring) |
782 | return; |
783 | |
784 | /* we are protected by the ring mutex */ |
785 | src = &fence->rdev->fence_drv[fence->ring]; |
786 | dst = &fence->rdev->fence_drv[dst_ring]; |
787 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
788 | if (i == dst_ring) |
789 | continue; |
790 | |
791 | dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); |
792 | } |
793 | } |
794 | |
795 | /** |
796 | * radeon_fence_driver_start_ring - make the fence driver |
797 | * ready for use on the requested ring. |
798 | * |
799 | * @rdev: radeon device pointer |
800 | * @ring: ring index to start the fence driver on |
801 | * |
802 | * Make the fence driver ready for processing (all asics). |
803 | * Not all asics have all rings, so each asic will only |
804 | * start the fence driver on the rings it has. |
805 | * Returns 0 for success, errors for failure. |
806 | */ |
807 | int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) |
808 | { |
809 | uint64_t index; |
810 | int r; |
811 | |
812 | radeon_scratch_free(rdev, reg: rdev->fence_drv[ring].scratch_reg); |
813 | if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, ring: &rdev->ring[ring])) { |
814 | rdev->fence_drv[ring].scratch_reg = 0; |
815 | if (ring != R600_RING_TYPE_UVD_INDEX) { |
816 | index = R600_WB_EVENT_OFFSET + ring * 4; |
817 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
818 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + |
819 | index; |
820 | |
821 | } else { |
822 | /* put fence directly behind firmware */ |
823 | index = ALIGN(rdev->uvd_fw->size, 8); |
824 | rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; |
825 | rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; |
826 | } |
827 | |
828 | } else { |
829 | r = radeon_scratch_get(rdev, reg: &rdev->fence_drv[ring].scratch_reg); |
830 | if (r) { |
831 | dev_err(rdev->dev, "fence failed to get scratch register\n" ); |
832 | return r; |
833 | } |
834 | index = RADEON_WB_SCRATCH_OFFSET + |
835 | rdev->fence_drv[ring].scratch_reg - |
836 | rdev->scratch.reg_base; |
837 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
838 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; |
839 | } |
840 | radeon_fence_write(rdev, seq: atomic64_read(v: &rdev->fence_drv[ring].last_seq), ring); |
841 | rdev->fence_drv[ring].initialized = true; |
842 | dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx\n" , |
843 | ring, rdev->fence_drv[ring].gpu_addr); |
844 | return 0; |
845 | } |
846 | |
847 | /** |
848 | * radeon_fence_driver_init_ring - init the fence driver |
849 | * for the requested ring. |
850 | * |
851 | * @rdev: radeon device pointer |
852 | * @ring: ring index to start the fence driver on |
853 | * |
854 | * Init the fence driver for the requested ring (all asics). |
855 | * Helper function for radeon_fence_driver_init(). |
856 | */ |
857 | static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) |
858 | { |
859 | int i; |
860 | |
861 | rdev->fence_drv[ring].scratch_reg = -1; |
862 | rdev->fence_drv[ring].cpu_addr = NULL; |
863 | rdev->fence_drv[ring].gpu_addr = 0; |
864 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
865 | rdev->fence_drv[ring].sync_seq[i] = 0; |
866 | atomic64_set(v: &rdev->fence_drv[ring].last_seq, i: 0); |
867 | rdev->fence_drv[ring].initialized = false; |
868 | INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work, |
869 | radeon_fence_check_lockup); |
870 | rdev->fence_drv[ring].rdev = rdev; |
871 | } |
872 | |
873 | /** |
874 | * radeon_fence_driver_init - init the fence driver |
875 | * for all possible rings. |
876 | * |
877 | * @rdev: radeon device pointer |
878 | * |
879 | * Init the fence driver for all possible rings (all asics). |
880 | * Not all asics have all rings, so each asic will only |
881 | * start the fence driver on the rings it has using |
882 | * radeon_fence_driver_start_ring(). |
883 | */ |
884 | void radeon_fence_driver_init(struct radeon_device *rdev) |
885 | { |
886 | int ring; |
887 | |
888 | init_waitqueue_head(&rdev->fence_queue); |
889 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) |
890 | radeon_fence_driver_init_ring(rdev, ring); |
891 | |
892 | radeon_debugfs_fence_init(rdev); |
893 | } |
894 | |
895 | /** |
896 | * radeon_fence_driver_fini - tear down the fence driver |
897 | * for all possible rings. |
898 | * |
899 | * @rdev: radeon device pointer |
900 | * |
901 | * Tear down the fence driver for all possible rings (all asics). |
902 | */ |
903 | void radeon_fence_driver_fini(struct radeon_device *rdev) |
904 | { |
905 | int ring, r; |
906 | |
907 | mutex_lock(&rdev->ring_lock); |
908 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
909 | if (!rdev->fence_drv[ring].initialized) |
910 | continue; |
911 | r = radeon_fence_wait_empty(rdev, ring); |
912 | if (r) { |
913 | /* no need to trigger GPU reset as we are unloading */ |
914 | radeon_fence_driver_force_completion(rdev, ring); |
915 | } |
916 | cancel_delayed_work_sync(dwork: &rdev->fence_drv[ring].lockup_work); |
917 | wake_up_all(&rdev->fence_queue); |
918 | radeon_scratch_free(rdev, reg: rdev->fence_drv[ring].scratch_reg); |
919 | rdev->fence_drv[ring].initialized = false; |
920 | } |
921 | mutex_unlock(lock: &rdev->ring_lock); |
922 | } |
923 | |
924 | /** |
925 | * radeon_fence_driver_force_completion - force all fence waiter to complete |
926 | * |
927 | * @rdev: radeon device pointer |
928 | * @ring: the ring to complete |
929 | * |
930 | * In case of GPU reset failure make sure no process keep waiting on fence |
931 | * that will never complete. |
932 | */ |
933 | void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring) |
934 | { |
935 | if (rdev->fence_drv[ring].initialized) { |
936 | radeon_fence_write(rdev, seq: rdev->fence_drv[ring].sync_seq[ring], ring); |
937 | cancel_delayed_work_sync(dwork: &rdev->fence_drv[ring].lockup_work); |
938 | } |
939 | } |
940 | |
941 | |
942 | /* |
943 | * Fence debugfs |
944 | */ |
945 | #if defined(CONFIG_DEBUG_FS) |
946 | static int radeon_debugfs_fence_info_show(struct seq_file *m, void *data) |
947 | { |
948 | struct radeon_device *rdev = m->private; |
949 | int i, j; |
950 | |
951 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
952 | if (!rdev->fence_drv[i].initialized) |
953 | continue; |
954 | |
955 | radeon_fence_process(rdev, ring: i); |
956 | |
957 | seq_printf(m, fmt: "--- ring %d ---\n" , i); |
958 | seq_printf(m, fmt: "Last signaled fence 0x%016llx\n" , |
959 | (unsigned long long)atomic64_read(v: &rdev->fence_drv[i].last_seq)); |
960 | seq_printf(m, fmt: "Last emitted 0x%016llx\n" , |
961 | rdev->fence_drv[i].sync_seq[i]); |
962 | |
963 | for (j = 0; j < RADEON_NUM_RINGS; ++j) { |
964 | if (i != j && rdev->fence_drv[j].initialized) |
965 | seq_printf(m, fmt: "Last sync to ring %d 0x%016llx\n" , |
966 | j, rdev->fence_drv[i].sync_seq[j]); |
967 | } |
968 | } |
969 | return 0; |
970 | } |
971 | |
972 | /* |
973 | * radeon_debugfs_gpu_reset - manually trigger a gpu reset |
974 | * |
975 | * Manually trigger a gpu reset at the next fence wait. |
976 | */ |
977 | static int radeon_debugfs_gpu_reset(void *data, u64 *val) |
978 | { |
979 | struct radeon_device *rdev = (struct radeon_device *)data; |
980 | |
981 | down_read(sem: &rdev->exclusive_lock); |
982 | *val = rdev->needs_reset; |
983 | rdev->needs_reset = true; |
984 | wake_up_all(&rdev->fence_queue); |
985 | up_read(sem: &rdev->exclusive_lock); |
986 | |
987 | return 0; |
988 | } |
989 | DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_fence_info); |
990 | DEFINE_DEBUGFS_ATTRIBUTE(radeon_debugfs_gpu_reset_fops, |
991 | radeon_debugfs_gpu_reset, NULL, "%lld\n" ); |
992 | #endif |
993 | |
994 | void radeon_debugfs_fence_init(struct radeon_device *rdev) |
995 | { |
996 | #if defined(CONFIG_DEBUG_FS) |
997 | struct dentry *root = rdev->ddev->primary->debugfs_root; |
998 | |
999 | debugfs_create_file(name: "radeon_gpu_reset" , mode: 0444, parent: root, data: rdev, |
1000 | fops: &radeon_debugfs_gpu_reset_fops); |
1001 | debugfs_create_file(name: "radeon_fence_info" , mode: 0444, parent: root, data: rdev, |
1002 | fops: &radeon_debugfs_fence_info_fops); |
1003 | |
1004 | |
1005 | #endif |
1006 | } |
1007 | |
1008 | static const char *radeon_fence_get_driver_name(struct dma_fence *fence) |
1009 | { |
1010 | return "radeon" ; |
1011 | } |
1012 | |
1013 | static const char *radeon_fence_get_timeline_name(struct dma_fence *f) |
1014 | { |
1015 | struct radeon_fence *fence = to_radeon_fence(f); |
1016 | |
1017 | switch (fence->ring) { |
1018 | case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx" ; |
1019 | case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1" ; |
1020 | case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2" ; |
1021 | case R600_RING_TYPE_DMA_INDEX: return "radeon.dma" ; |
1022 | case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1" ; |
1023 | case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd" ; |
1024 | case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1" ; |
1025 | case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2" ; |
1026 | default: |
1027 | WARN_ON_ONCE(1); |
1028 | return "radeon.unk" ; |
1029 | } |
1030 | } |
1031 | |
1032 | static inline bool radeon_test_signaled(struct radeon_fence *fence) |
1033 | { |
1034 | return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); |
1035 | } |
1036 | |
1037 | struct radeon_wait_cb { |
1038 | struct dma_fence_cb base; |
1039 | struct task_struct *task; |
1040 | }; |
1041 | |
1042 | static void |
1043 | radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
1044 | { |
1045 | struct radeon_wait_cb *wait = |
1046 | container_of(cb, struct radeon_wait_cb, base); |
1047 | |
1048 | wake_up_process(tsk: wait->task); |
1049 | } |
1050 | |
1051 | static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr, |
1052 | signed long t) |
1053 | { |
1054 | struct radeon_fence *fence = to_radeon_fence(f); |
1055 | struct radeon_device *rdev = fence->rdev; |
1056 | struct radeon_wait_cb cb; |
1057 | |
1058 | cb.task = current; |
1059 | |
1060 | if (dma_fence_add_callback(fence: f, cb: &cb.base, func: radeon_fence_wait_cb)) |
1061 | return t; |
1062 | |
1063 | while (t > 0) { |
1064 | if (intr) |
1065 | set_current_state(TASK_INTERRUPTIBLE); |
1066 | else |
1067 | set_current_state(TASK_UNINTERRUPTIBLE); |
1068 | |
1069 | /* |
1070 | * radeon_test_signaled must be called after |
1071 | * set_current_state to prevent a race with wake_up_process |
1072 | */ |
1073 | if (radeon_test_signaled(fence)) |
1074 | break; |
1075 | |
1076 | if (rdev->needs_reset) { |
1077 | t = -EDEADLK; |
1078 | break; |
1079 | } |
1080 | |
1081 | t = schedule_timeout(timeout: t); |
1082 | |
1083 | if (t > 0 && intr && signal_pending(current)) |
1084 | t = -ERESTARTSYS; |
1085 | } |
1086 | |
1087 | __set_current_state(TASK_RUNNING); |
1088 | dma_fence_remove_callback(fence: f, cb: &cb.base); |
1089 | |
1090 | return t; |
1091 | } |
1092 | |
1093 | const struct dma_fence_ops radeon_fence_ops = { |
1094 | .get_driver_name = radeon_fence_get_driver_name, |
1095 | .get_timeline_name = radeon_fence_get_timeline_name, |
1096 | .enable_signaling = radeon_fence_enable_signaling, |
1097 | .signaled = radeon_fence_is_signaled, |
1098 | .wait = radeon_fence_default_wait, |
1099 | .release = NULL, |
1100 | }; |
1101 | |