1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * The contents of this file are private to DMA engine drivers, and is not
4 * part of the API to be used by DMA engine users.
5 */
6#ifndef DMAENGINE_H
7#define DMAENGINE_H
8
9#include <linux/bug.h>
10#include <linux/dmaengine.h>
11
12/**
13 * dma_cookie_init - initialize the cookies for a DMA channel
14 * @chan: dma channel to initialize
15 */
16static inline void dma_cookie_init(struct dma_chan *chan)
17{
18 chan->cookie = DMA_MIN_COOKIE;
19 chan->completed_cookie = DMA_MIN_COOKIE;
20}
21
22/**
23 * dma_cookie_assign - assign a DMA engine cookie to the descriptor
24 * @tx: descriptor needing cookie
25 *
26 * Assign a unique non-zero per-channel cookie to the descriptor.
27 * Note: caller is expected to hold a lock to prevent concurrency.
28 */
29static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
30{
31 struct dma_chan *chan = tx->chan;
32 dma_cookie_t cookie;
33
34 cookie = chan->cookie + 1;
35 if (cookie < DMA_MIN_COOKIE)
36 cookie = DMA_MIN_COOKIE;
37 tx->cookie = chan->cookie = cookie;
38
39 return cookie;
40}
41
42/**
43 * dma_cookie_complete - complete a descriptor
44 * @tx: descriptor to complete
45 *
46 * Mark this descriptor complete by updating the channels completed
47 * cookie marker. Zero the descriptors cookie to prevent accidental
48 * repeated completions.
49 *
50 * Note: caller is expected to hold a lock to prevent concurrency.
51 */
52static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
53{
54 BUG_ON(tx->cookie < DMA_MIN_COOKIE);
55 tx->chan->completed_cookie = tx->cookie;
56 tx->cookie = 0;
57}
58
59/**
60 * dma_cookie_status - report cookie status
61 * @chan: dma channel
62 * @cookie: cookie we are interested in
63 * @state: dma_tx_state structure to return last/used cookies
64 *
65 * Report the status of the cookie, filling in the state structure if
66 * non-NULL. No locking is required.
67 */
68static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
69 dma_cookie_t cookie, struct dma_tx_state *state)
70{
71 dma_cookie_t used, complete;
72
73 used = chan->cookie;
74 complete = chan->completed_cookie;
75 barrier();
76 if (state) {
77 state->last = complete;
78 state->used = used;
79 state->residue = 0;
80 }
81 return dma_async_is_complete(cookie, complete, used);
82}
83
84static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
85{
86 if (state)
87 state->residue = residue;
88}
89
90struct dmaengine_desc_callback {
91 dma_async_tx_callback callback;
92 dma_async_tx_callback_result callback_result;
93 void *callback_param;
94};
95
96/**
97 * dmaengine_desc_get_callback - get the passed in callback function
98 * @tx: tx descriptor
99 * @cb: temp struct to hold the callback info
100 *
101 * Fill the passed in cb struct with what's available in the passed in
102 * tx descriptor struct
103 * No locking is required.
104 */
105static inline void
106dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
107 struct dmaengine_desc_callback *cb)
108{
109 cb->callback = tx->callback;
110 cb->callback_result = tx->callback_result;
111 cb->callback_param = tx->callback_param;
112}
113
114/**
115 * dmaengine_desc_callback_invoke - call the callback function in cb struct
116 * @cb: temp struct that is holding the callback info
117 * @result: transaction result
118 *
119 * Call the callback function provided in the cb struct with the parameter
120 * in the cb struct.
121 * Locking is dependent on the driver.
122 */
123static inline void
124dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
125 const struct dmaengine_result *result)
126{
127 struct dmaengine_result dummy_result = {
128 .result = DMA_TRANS_NOERROR,
129 .residue = 0
130 };
131
132 if (cb->callback_result) {
133 if (!result)
134 result = &dummy_result;
135 cb->callback_result(cb->callback_param, result);
136 } else if (cb->callback) {
137 cb->callback(cb->callback_param);
138 }
139}
140
141/**
142 * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
143 * then immediately call the callback.
144 * @tx: dma async tx descriptor
145 * @result: transaction result
146 *
147 * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
148 * in a single function since no work is necessary in between for the driver.
149 * Locking is dependent on the driver.
150 */
151static inline void
152dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
153 const struct dmaengine_result *result)
154{
155 struct dmaengine_desc_callback cb;
156
157 dmaengine_desc_get_callback(tx, &cb);
158 dmaengine_desc_callback_invoke(&cb, result);
159}
160
161/**
162 * dmaengine_desc_callback_valid - verify the callback is valid in cb
163 * @cb: callback info struct
164 *
165 * Return a bool that verifies whether callback in cb is valid or not.
166 * No locking is required.
167 */
168static inline bool
169dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
170{
171 return (cb->callback) ? true : false;
172}
173
174#endif
175