1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Fence mechanism for dma-buf to allow for asynchronous dma access |
4 | * |
5 | * Copyright (C) 2012 Canonical Ltd |
6 | * Copyright (C) 2012 Texas Instruments |
7 | * |
8 | * Authors: |
9 | * Rob Clark <robdclark@gmail.com> |
10 | * Maarten Lankhorst <maarten.lankhorst@canonical.com> |
11 | */ |
12 | |
13 | #ifndef __LINUX_DMA_FENCE_H |
14 | #define __LINUX_DMA_FENCE_H |
15 | |
16 | #include <linux/err.h> |
17 | #include <linux/wait.h> |
18 | #include <linux/list.h> |
19 | #include <linux/bitops.h> |
20 | #include <linux/kref.h> |
21 | #include <linux/sched.h> |
22 | #include <linux/printk.h> |
23 | #include <linux/rcupdate.h> |
24 | |
25 | struct dma_fence; |
26 | struct dma_fence_ops; |
27 | struct dma_fence_cb; |
28 | |
29 | /** |
30 | * struct dma_fence - software synchronization primitive |
31 | * @refcount: refcount for this fence |
32 | * @ops: dma_fence_ops associated with this fence |
33 | * @rcu: used for releasing fence with kfree_rcu |
34 | * @cb_list: list of all callbacks to call |
35 | * @lock: spin_lock_irqsave used for locking |
36 | * @context: execution context this fence belongs to, returned by |
37 | * dma_fence_context_alloc() |
38 | * @seqno: the sequence number of this fence inside the execution context, |
39 | * can be compared to decide which fence would be signaled later. |
40 | * @flags: A mask of DMA_FENCE_FLAG_* defined below |
41 | * @timestamp: Timestamp when the fence was signaled. |
42 | * @error: Optional, only valid if < 0, must be set before calling |
43 | * dma_fence_signal, indicates that the fence has completed with an error. |
44 | * |
45 | * the flags member must be manipulated and read using the appropriate |
46 | * atomic ops (bit_*), so taking the spinlock will not be needed most |
47 | * of the time. |
48 | * |
49 | * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled |
50 | * DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling |
51 | * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called |
52 | * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the |
53 | * implementer of the fence for its own purposes. Can be used in different |
54 | * ways by different fence implementers, so do not rely on this. |
55 | * |
56 | * Since atomic bitops are used, this is not guaranteed to be the case. |
57 | * Particularly, if the bit was set, but dma_fence_signal was called right |
58 | * before this bit was set, it would have been able to set the |
59 | * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. |
60 | * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting |
61 | * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that |
62 | * after dma_fence_signal was called, any enable_signaling call will have either |
63 | * been completed, or never called at all. |
64 | */ |
65 | struct dma_fence { |
66 | spinlock_t *lock; |
67 | const struct dma_fence_ops *ops; |
68 | /* |
69 | * We clear the callback list on kref_put so that by the time we |
70 | * release the fence it is unused. No one should be adding to the |
71 | * cb_list that they don't themselves hold a reference for. |
72 | * |
73 | * The lifetime of the timestamp is similarly tied to both the |
74 | * rcu freelist and the cb_list. The timestamp is only set upon |
75 | * signaling while simultaneously notifying the cb_list. Ergo, we |
76 | * only use either the cb_list of timestamp. Upon destruction, |
77 | * neither are accessible, and so we can use the rcu. This means |
78 | * that the cb_list is *only* valid until the signal bit is set, |
79 | * and to read either you *must* hold a reference to the fence, |
80 | * and not just the rcu_read_lock. |
81 | * |
82 | * Listed in chronological order. |
83 | */ |
84 | union { |
85 | struct list_head cb_list; |
86 | /* @cb_list replaced by @timestamp on dma_fence_signal() */ |
87 | ktime_t timestamp; |
88 | /* @timestamp replaced by @rcu on dma_fence_release() */ |
89 | struct rcu_head rcu; |
90 | }; |
91 | u64 context; |
92 | u64 seqno; |
93 | unsigned long flags; |
94 | struct kref refcount; |
95 | int error; |
96 | }; |
97 | |
98 | enum dma_fence_flag_bits { |
99 | DMA_FENCE_FLAG_SIGNALED_BIT, |
100 | DMA_FENCE_FLAG_TIMESTAMP_BIT, |
101 | DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, |
102 | DMA_FENCE_FLAG_USER_BITS, /* must always be last member */ |
103 | }; |
104 | |
105 | typedef void (*dma_fence_func_t)(struct dma_fence *fence, |
106 | struct dma_fence_cb *cb); |
107 | |
108 | /** |
109 | * struct dma_fence_cb - callback for dma_fence_add_callback() |
110 | * @node: used by dma_fence_add_callback() to append this struct to fence::cb_list |
111 | * @func: dma_fence_func_t to call |
112 | * |
113 | * This struct will be initialized by dma_fence_add_callback(), additional |
114 | * data can be passed along by embedding dma_fence_cb in another struct. |
115 | */ |
116 | struct dma_fence_cb { |
117 | struct list_head node; |
118 | dma_fence_func_t func; |
119 | }; |
120 | |
121 | /** |
122 | * struct dma_fence_ops - operations implemented for fence |
123 | * |
124 | */ |
125 | struct dma_fence_ops { |
126 | /** |
127 | * @use_64bit_seqno: |
128 | * |
129 | * True if this dma_fence implementation uses 64bit seqno, false |
130 | * otherwise. |
131 | */ |
132 | bool use_64bit_seqno; |
133 | |
134 | /** |
135 | * @get_driver_name: |
136 | * |
137 | * Returns the driver name. This is a callback to allow drivers to |
138 | * compute the name at runtime, without having it to store permanently |
139 | * for each fence, or build a cache of some sort. |
140 | * |
141 | * This callback is mandatory. |
142 | */ |
143 | const char * (*get_driver_name)(struct dma_fence *fence); |
144 | |
145 | /** |
146 | * @get_timeline_name: |
147 | * |
148 | * Return the name of the context this fence belongs to. This is a |
149 | * callback to allow drivers to compute the name at runtime, without |
150 | * having it to store permanently for each fence, or build a cache of |
151 | * some sort. |
152 | * |
153 | * This callback is mandatory. |
154 | */ |
155 | const char * (*get_timeline_name)(struct dma_fence *fence); |
156 | |
157 | /** |
158 | * @enable_signaling: |
159 | * |
160 | * Enable software signaling of fence. |
161 | * |
162 | * For fence implementations that have the capability for hw->hw |
163 | * signaling, they can implement this op to enable the necessary |
164 | * interrupts, or insert commands into cmdstream, etc, to avoid these |
165 | * costly operations for the common case where only hw->hw |
166 | * synchronization is required. This is called in the first |
167 | * dma_fence_wait() or dma_fence_add_callback() path to let the fence |
168 | * implementation know that there is another driver waiting on the |
169 | * signal (ie. hw->sw case). |
170 | * |
171 | * This function can be called from atomic context, but not |
172 | * from irq context, so normal spinlocks can be used. |
173 | * |
174 | * A return value of false indicates the fence already passed, |
175 | * or some failure occurred that made it impossible to enable |
176 | * signaling. True indicates successful enabling. |
177 | * |
178 | * &dma_fence.error may be set in enable_signaling, but only when false |
179 | * is returned. |
180 | * |
181 | * Since many implementations can call dma_fence_signal() even when before |
182 | * @enable_signaling has been called there's a race window, where the |
183 | * dma_fence_signal() might result in the final fence reference being |
184 | * released and its memory freed. To avoid this, implementations of this |
185 | * callback should grab their own reference using dma_fence_get(), to be |
186 | * released when the fence is signalled (through e.g. the interrupt |
187 | * handler). |
188 | * |
189 | * This callback is optional. If this callback is not present, then the |
190 | * driver must always have signaling enabled. |
191 | */ |
192 | bool (*enable_signaling)(struct dma_fence *fence); |
193 | |
194 | /** |
195 | * @signaled: |
196 | * |
197 | * Peek whether the fence is signaled, as a fastpath optimization for |
198 | * e.g. dma_fence_wait() or dma_fence_add_callback(). Note that this |
199 | * callback does not need to make any guarantees beyond that a fence |
200 | * once indicates as signalled must always return true from this |
201 | * callback. This callback may return false even if the fence has |
202 | * completed already, in this case information hasn't propogated throug |
203 | * the system yet. See also dma_fence_is_signaled(). |
204 | * |
205 | * May set &dma_fence.error if returning true. |
206 | * |
207 | * This callback is optional. |
208 | */ |
209 | bool (*signaled)(struct dma_fence *fence); |
210 | |
211 | /** |
212 | * @wait: |
213 | * |
214 | * Custom wait implementation, defaults to dma_fence_default_wait() if |
215 | * not set. |
216 | * |
217 | * Deprecated and should not be used by new implementations. Only used |
218 | * by existing implementations which need special handling for their |
219 | * hardware reset procedure. |
220 | * |
221 | * Must return -ERESTARTSYS if the wait is intr = true and the wait was |
222 | * interrupted, and remaining jiffies if fence has signaled, or 0 if wait |
223 | * timed out. Can also return other error values on custom implementations, |
224 | * which should be treated as if the fence is signaled. For example a hardware |
225 | * lockup could be reported like that. |
226 | */ |
227 | signed long (*wait)(struct dma_fence *fence, |
228 | bool intr, signed long timeout); |
229 | |
230 | /** |
231 | * @release: |
232 | * |
233 | * Called on destruction of fence to release additional resources. |
234 | * Can be called from irq context. This callback is optional. If it is |
235 | * NULL, then dma_fence_free() is instead called as the default |
236 | * implementation. |
237 | */ |
238 | void (*release)(struct dma_fence *fence); |
239 | |
240 | /** |
241 | * @fence_value_str: |
242 | * |
243 | * Callback to fill in free-form debug info specific to this fence, like |
244 | * the sequence number. |
245 | * |
246 | * This callback is optional. |
247 | */ |
248 | void (*fence_value_str)(struct dma_fence *fence, char *str, int size); |
249 | |
250 | /** |
251 | * @timeline_value_str: |
252 | * |
253 | * Fills in the current value of the timeline as a string, like the |
254 | * sequence number. Note that the specific fence passed to this function |
255 | * should not matter, drivers should only use it to look up the |
256 | * corresponding timeline structures. |
257 | */ |
258 | void (*timeline_value_str)(struct dma_fence *fence, |
259 | char *str, int size); |
260 | |
261 | /** |
262 | * @set_deadline: |
263 | * |
264 | * Callback to allow a fence waiter to inform the fence signaler of |
265 | * an upcoming deadline, such as vblank, by which point the waiter |
266 | * would prefer the fence to be signaled by. This is intended to |
267 | * give feedback to the fence signaler to aid in power management |
268 | * decisions, such as boosting GPU frequency. |
269 | * |
270 | * This is called without &dma_fence.lock held, it can be called |
271 | * multiple times and from any context. Locking is up to the callee |
272 | * if it has some state to manage. If multiple deadlines are set, |
273 | * the expectation is to track the soonest one. If the deadline is |
274 | * before the current time, it should be interpreted as an immediate |
275 | * deadline. |
276 | * |
277 | * This callback is optional. |
278 | */ |
279 | void (*set_deadline)(struct dma_fence *fence, ktime_t deadline); |
280 | }; |
281 | |
282 | void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, |
283 | spinlock_t *lock, u64 context, u64 seqno); |
284 | |
285 | void dma_fence_release(struct kref *kref); |
286 | void dma_fence_free(struct dma_fence *fence); |
287 | void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq); |
288 | |
289 | /** |
290 | * dma_fence_put - decreases refcount of the fence |
291 | * @fence: fence to reduce refcount of |
292 | */ |
293 | static inline void dma_fence_put(struct dma_fence *fence) |
294 | { |
295 | if (fence) |
296 | kref_put(kref: &fence->refcount, release: dma_fence_release); |
297 | } |
298 | |
299 | /** |
300 | * dma_fence_get - increases refcount of the fence |
301 | * @fence: fence to increase refcount of |
302 | * |
303 | * Returns the same fence, with refcount increased by 1. |
304 | */ |
305 | static inline struct dma_fence *dma_fence_get(struct dma_fence *fence) |
306 | { |
307 | if (fence) |
308 | kref_get(kref: &fence->refcount); |
309 | return fence; |
310 | } |
311 | |
312 | /** |
313 | * dma_fence_get_rcu - get a fence from a dma_resv_list with |
314 | * rcu read lock |
315 | * @fence: fence to increase refcount of |
316 | * |
317 | * Function returns NULL if no refcount could be obtained, or the fence. |
318 | */ |
319 | static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) |
320 | { |
321 | if (kref_get_unless_zero(kref: &fence->refcount)) |
322 | return fence; |
323 | else |
324 | return NULL; |
325 | } |
326 | |
327 | /** |
328 | * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence |
329 | * @fencep: pointer to fence to increase refcount of |
330 | * |
331 | * Function returns NULL if no refcount could be obtained, or the fence. |
332 | * This function handles acquiring a reference to a fence that may be |
333 | * reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU), |
334 | * so long as the caller is using RCU on the pointer to the fence. |
335 | * |
336 | * An alternative mechanism is to employ a seqlock to protect a bunch of |
337 | * fences, such as used by struct dma_resv. When using a seqlock, |
338 | * the seqlock must be taken before and checked after a reference to the |
339 | * fence is acquired (as shown here). |
340 | * |
341 | * The caller is required to hold the RCU read lock. |
342 | */ |
343 | static inline struct dma_fence * |
344 | dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep) |
345 | { |
346 | do { |
347 | struct dma_fence *fence; |
348 | |
349 | fence = rcu_dereference(*fencep); |
350 | if (!fence) |
351 | return NULL; |
352 | |
353 | if (!dma_fence_get_rcu(fence)) |
354 | continue; |
355 | |
356 | /* The atomic_inc_not_zero() inside dma_fence_get_rcu() |
357 | * provides a full memory barrier upon success (such as now). |
358 | * This is paired with the write barrier from assigning |
359 | * to the __rcu protected fence pointer so that if that |
360 | * pointer still matches the current fence, we know we |
361 | * have successfully acquire a reference to it. If it no |
362 | * longer matches, we are holding a reference to some other |
363 | * reallocated pointer. This is possible if the allocator |
364 | * is using a freelist like SLAB_TYPESAFE_BY_RCU where the |
365 | * fence remains valid for the RCU grace period, but it |
366 | * may be reallocated. When using such allocators, we are |
367 | * responsible for ensuring the reference we get is to |
368 | * the right fence, as below. |
369 | */ |
370 | if (fence == rcu_access_pointer(*fencep)) |
371 | return rcu_pointer_handoff(fence); |
372 | |
373 | dma_fence_put(fence); |
374 | } while (1); |
375 | } |
376 | |
377 | #ifdef CONFIG_LOCKDEP |
378 | bool dma_fence_begin_signalling(void); |
379 | void dma_fence_end_signalling(bool cookie); |
380 | void __dma_fence_might_wait(void); |
381 | #else |
382 | static inline bool dma_fence_begin_signalling(void) |
383 | { |
384 | return true; |
385 | } |
386 | static inline void dma_fence_end_signalling(bool cookie) {} |
387 | static inline void __dma_fence_might_wait(void) {} |
388 | #endif |
389 | |
390 | int dma_fence_signal(struct dma_fence *fence); |
391 | int dma_fence_signal_locked(struct dma_fence *fence); |
392 | int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp); |
393 | int dma_fence_signal_timestamp_locked(struct dma_fence *fence, |
394 | ktime_t timestamp); |
395 | signed long dma_fence_default_wait(struct dma_fence *fence, |
396 | bool intr, signed long timeout); |
397 | int dma_fence_add_callback(struct dma_fence *fence, |
398 | struct dma_fence_cb *cb, |
399 | dma_fence_func_t func); |
400 | bool dma_fence_remove_callback(struct dma_fence *fence, |
401 | struct dma_fence_cb *cb); |
402 | void dma_fence_enable_sw_signaling(struct dma_fence *fence); |
403 | |
404 | /** |
405 | * dma_fence_is_signaled_locked - Return an indication if the fence |
406 | * is signaled yet. |
407 | * @fence: the fence to check |
408 | * |
409 | * Returns true if the fence was already signaled, false if not. Since this |
410 | * function doesn't enable signaling, it is not guaranteed to ever return |
411 | * true if dma_fence_add_callback(), dma_fence_wait() or |
412 | * dma_fence_enable_sw_signaling() haven't been called before. |
413 | * |
414 | * This function requires &dma_fence.lock to be held. |
415 | * |
416 | * See also dma_fence_is_signaled(). |
417 | */ |
418 | static inline bool |
419 | dma_fence_is_signaled_locked(struct dma_fence *fence) |
420 | { |
421 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
422 | return true; |
423 | |
424 | if (fence->ops->signaled && fence->ops->signaled(fence)) { |
425 | dma_fence_signal_locked(fence); |
426 | return true; |
427 | } |
428 | |
429 | return false; |
430 | } |
431 | |
432 | /** |
433 | * dma_fence_is_signaled - Return an indication if the fence is signaled yet. |
434 | * @fence: the fence to check |
435 | * |
436 | * Returns true if the fence was already signaled, false if not. Since this |
437 | * function doesn't enable signaling, it is not guaranteed to ever return |
438 | * true if dma_fence_add_callback(), dma_fence_wait() or |
439 | * dma_fence_enable_sw_signaling() haven't been called before. |
440 | * |
441 | * It's recommended for seqno fences to call dma_fence_signal when the |
442 | * operation is complete, it makes it possible to prevent issues from |
443 | * wraparound between time of issue and time of use by checking the return |
444 | * value of this function before calling hardware-specific wait instructions. |
445 | * |
446 | * See also dma_fence_is_signaled_locked(). |
447 | */ |
448 | static inline bool |
449 | dma_fence_is_signaled(struct dma_fence *fence) |
450 | { |
451 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
452 | return true; |
453 | |
454 | if (fence->ops->signaled && fence->ops->signaled(fence)) { |
455 | dma_fence_signal(fence); |
456 | return true; |
457 | } |
458 | |
459 | return false; |
460 | } |
461 | |
462 | /** |
463 | * __dma_fence_is_later - return if f1 is chronologically later than f2 |
464 | * @f1: the first fence's seqno |
465 | * @f2: the second fence's seqno from the same context |
466 | * @ops: dma_fence_ops associated with the seqno |
467 | * |
468 | * Returns true if f1 is chronologically later than f2. Both fences must be |
469 | * from the same context, since a seqno is not common across contexts. |
470 | */ |
471 | static inline bool __dma_fence_is_later(u64 f1, u64 f2, |
472 | const struct dma_fence_ops *ops) |
473 | { |
474 | /* This is for backward compatibility with drivers which can only handle |
475 | * 32bit sequence numbers. Use a 64bit compare when the driver says to |
476 | * do so. |
477 | */ |
478 | if (ops->use_64bit_seqno) |
479 | return f1 > f2; |
480 | |
481 | return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0; |
482 | } |
483 | |
484 | /** |
485 | * dma_fence_is_later - return if f1 is chronologically later than f2 |
486 | * @f1: the first fence from the same context |
487 | * @f2: the second fence from the same context |
488 | * |
489 | * Returns true if f1 is chronologically later than f2. Both fences must be |
490 | * from the same context, since a seqno is not re-used across contexts. |
491 | */ |
492 | static inline bool dma_fence_is_later(struct dma_fence *f1, |
493 | struct dma_fence *f2) |
494 | { |
495 | if (WARN_ON(f1->context != f2->context)) |
496 | return false; |
497 | |
498 | return __dma_fence_is_later(f1: f1->seqno, f2: f2->seqno, ops: f1->ops); |
499 | } |
500 | |
501 | /** |
502 | * dma_fence_later - return the chronologically later fence |
503 | * @f1: the first fence from the same context |
504 | * @f2: the second fence from the same context |
505 | * |
506 | * Returns NULL if both fences are signaled, otherwise the fence that would be |
507 | * signaled last. Both fences must be from the same context, since a seqno is |
508 | * not re-used across contexts. |
509 | */ |
510 | static inline struct dma_fence *dma_fence_later(struct dma_fence *f1, |
511 | struct dma_fence *f2) |
512 | { |
513 | if (WARN_ON(f1->context != f2->context)) |
514 | return NULL; |
515 | |
516 | /* |
517 | * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never |
518 | * have been set if enable_signaling wasn't called, and enabling that |
519 | * here is overkill. |
520 | */ |
521 | if (dma_fence_is_later(f1, f2)) |
522 | return dma_fence_is_signaled(fence: f1) ? NULL : f1; |
523 | else |
524 | return dma_fence_is_signaled(fence: f2) ? NULL : f2; |
525 | } |
526 | |
527 | /** |
528 | * dma_fence_get_status_locked - returns the status upon completion |
529 | * @fence: the dma_fence to query |
530 | * |
531 | * Drivers can supply an optional error status condition before they signal |
532 | * the fence (to indicate whether the fence was completed due to an error |
533 | * rather than success). The value of the status condition is only valid |
534 | * if the fence has been signaled, dma_fence_get_status_locked() first checks |
535 | * the signal state before reporting the error status. |
536 | * |
537 | * Returns 0 if the fence has not yet been signaled, 1 if the fence has |
538 | * been signaled without an error condition, or a negative error code |
539 | * if the fence has been completed in err. |
540 | */ |
541 | static inline int dma_fence_get_status_locked(struct dma_fence *fence) |
542 | { |
543 | if (dma_fence_is_signaled_locked(fence)) |
544 | return fence->error ?: 1; |
545 | else |
546 | return 0; |
547 | } |
548 | |
549 | int dma_fence_get_status(struct dma_fence *fence); |
550 | |
551 | /** |
552 | * dma_fence_set_error - flag an error condition on the fence |
553 | * @fence: the dma_fence |
554 | * @error: the error to store |
555 | * |
556 | * Drivers can supply an optional error status condition before they signal |
557 | * the fence, to indicate that the fence was completed due to an error |
558 | * rather than success. This must be set before signaling (so that the value |
559 | * is visible before any waiters on the signal callback are woken). This |
560 | * helper exists to help catching erroneous setting of #dma_fence.error. |
561 | */ |
562 | static inline void dma_fence_set_error(struct dma_fence *fence, |
563 | int error) |
564 | { |
565 | WARN_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); |
566 | WARN_ON(error >= 0 || error < -MAX_ERRNO); |
567 | |
568 | fence->error = error; |
569 | } |
570 | |
571 | /** |
572 | * dma_fence_timestamp - helper to get the completion timestamp of a fence |
573 | * @fence: fence to get the timestamp from. |
574 | * |
575 | * After a fence is signaled the timestamp is updated with the signaling time, |
576 | * but setting the timestamp can race with tasks waiting for the signaling. This |
577 | * helper busy waits for the correct timestamp to appear. |
578 | */ |
579 | static inline ktime_t dma_fence_timestamp(struct dma_fence *fence) |
580 | { |
581 | if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))) |
582 | return ktime_get(); |
583 | |
584 | while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) |
585 | cpu_relax(); |
586 | |
587 | return fence->timestamp; |
588 | } |
589 | |
590 | signed long dma_fence_wait_timeout(struct dma_fence *, |
591 | bool intr, signed long timeout); |
592 | signed long dma_fence_wait_any_timeout(struct dma_fence **fences, |
593 | uint32_t count, |
594 | bool intr, signed long timeout, |
595 | uint32_t *idx); |
596 | |
597 | /** |
598 | * dma_fence_wait - sleep until the fence gets signaled |
599 | * @fence: the fence to wait on |
600 | * @intr: if true, do an interruptible wait |
601 | * |
602 | * This function will return -ERESTARTSYS if interrupted by a signal, |
603 | * or 0 if the fence was signaled. Other error values may be |
604 | * returned on custom implementations. |
605 | * |
606 | * Performs a synchronous wait on this fence. It is assumed the caller |
607 | * directly or indirectly holds a reference to the fence, otherwise the |
608 | * fence might be freed before return, resulting in undefined behavior. |
609 | * |
610 | * See also dma_fence_wait_timeout() and dma_fence_wait_any_timeout(). |
611 | */ |
612 | static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) |
613 | { |
614 | signed long ret; |
615 | |
616 | /* Since dma_fence_wait_timeout cannot timeout with |
617 | * MAX_SCHEDULE_TIMEOUT, only valid return values are |
618 | * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. |
619 | */ |
620 | ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); |
621 | |
622 | return ret < 0 ? ret : 0; |
623 | } |
624 | |
625 | void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline); |
626 | |
627 | struct dma_fence *dma_fence_get_stub(void); |
628 | struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp); |
629 | u64 dma_fence_context_alloc(unsigned num); |
630 | |
631 | extern const struct dma_fence_ops dma_fence_array_ops; |
632 | extern const struct dma_fence_ops dma_fence_chain_ops; |
633 | |
634 | /** |
635 | * dma_fence_is_array - check if a fence is from the array subclass |
636 | * @fence: the fence to test |
637 | * |
638 | * Return true if it is a dma_fence_array and false otherwise. |
639 | */ |
640 | static inline bool dma_fence_is_array(struct dma_fence *fence) |
641 | { |
642 | return fence->ops == &dma_fence_array_ops; |
643 | } |
644 | |
645 | /** |
646 | * dma_fence_is_chain - check if a fence is from the chain subclass |
647 | * @fence: the fence to test |
648 | * |
649 | * Return true if it is a dma_fence_chain and false otherwise. |
650 | */ |
651 | static inline bool dma_fence_is_chain(struct dma_fence *fence) |
652 | { |
653 | return fence->ops == &dma_fence_chain_ops; |
654 | } |
655 | |
656 | /** |
657 | * dma_fence_is_container - check if a fence is a container for other fences |
658 | * @fence: the fence to test |
659 | * |
660 | * Return true if this fence is a container for other fences, false otherwise. |
661 | * This is important since we can't build up large fence structure or otherwise |
662 | * we run into recursion during operation on those fences. |
663 | */ |
664 | static inline bool dma_fence_is_container(struct dma_fence *fence) |
665 | { |
666 | return dma_fence_is_array(fence) || dma_fence_is_chain(fence); |
667 | } |
668 | |
669 | #endif /* __LINUX_DMA_FENCE_H */ |
670 | |