1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. |
4 | * Copyright (c) 2020, Linaro Limited |
5 | */ |
6 | |
7 | #include <dt-bindings/dma/qcom-gpi.h> |
8 | #include <linux/bitfield.h> |
9 | #include <linux/dma-mapping.h> |
10 | #include <linux/dmaengine.h> |
11 | #include <linux/module.h> |
12 | #include <linux/of_dma.h> |
13 | #include <linux/platform_device.h> |
14 | #include <linux/dma/qcom-gpi-dma.h> |
15 | #include <linux/scatterlist.h> |
16 | #include <linux/slab.h> |
17 | #include "../dmaengine.h" |
18 | #include "../virt-dma.h" |
19 | |
20 | #define TRE_TYPE_DMA 0x10 |
21 | #define TRE_TYPE_GO 0x20 |
22 | #define TRE_TYPE_CONFIG0 0x22 |
23 | |
24 | /* TRE flags */ |
25 | #define TRE_FLAGS_CHAIN BIT(0) |
26 | #define TRE_FLAGS_IEOB BIT(8) |
27 | #define TRE_FLAGS_IEOT BIT(9) |
28 | #define TRE_FLAGS_BEI BIT(10) |
29 | #define TRE_FLAGS_LINK BIT(11) |
30 | #define TRE_FLAGS_TYPE GENMASK(23, 16) |
31 | |
32 | /* SPI CONFIG0 WD0 */ |
33 | #define TRE_SPI_C0_WORD_SZ GENMASK(4, 0) |
34 | #define TRE_SPI_C0_LOOPBACK BIT(8) |
35 | #define TRE_SPI_C0_CS BIT(11) |
36 | #define TRE_SPI_C0_CPHA BIT(12) |
37 | #define TRE_SPI_C0_CPOL BIT(13) |
38 | #define TRE_SPI_C0_TX_PACK BIT(24) |
39 | #define TRE_SPI_C0_RX_PACK BIT(25) |
40 | |
41 | /* CONFIG0 WD2 */ |
42 | #define TRE_C0_CLK_DIV GENMASK(11, 0) |
43 | #define TRE_C0_CLK_SRC GENMASK(19, 16) |
44 | |
45 | /* SPI GO WD0 */ |
46 | #define TRE_SPI_GO_CMD GENMASK(4, 0) |
47 | #define TRE_SPI_GO_CS GENMASK(10, 8) |
48 | #define TRE_SPI_GO_FRAG BIT(26) |
49 | |
50 | /* GO WD2 */ |
51 | #define TRE_RX_LEN GENMASK(23, 0) |
52 | |
53 | /* I2C Config0 WD0 */ |
54 | #define TRE_I2C_C0_TLOW GENMASK(7, 0) |
55 | #define TRE_I2C_C0_THIGH GENMASK(15, 8) |
56 | #define TRE_I2C_C0_TCYL GENMASK(23, 16) |
57 | #define TRE_I2C_C0_TX_PACK BIT(24) |
58 | #define TRE_I2C_C0_RX_PACK BIT(25) |
59 | |
60 | /* I2C GO WD0 */ |
61 | #define TRE_I2C_GO_CMD GENMASK(4, 0) |
62 | #define TRE_I2C_GO_ADDR GENMASK(14, 8) |
63 | #define TRE_I2C_GO_STRETCH BIT(26) |
64 | |
65 | /* DMA TRE */ |
66 | #define TRE_DMA_LEN GENMASK(23, 0) |
67 | |
68 | /* Register offsets from gpi-top */ |
69 | #define GPII_n_CH_k_CNTXT_0_OFFS(n, k) (0x20000 + (0x4000 * (n)) + (0x80 * (k))) |
70 | #define GPII_n_CH_k_CNTXT_0_EL_SIZE GENMASK(31, 24) |
71 | #define GPII_n_CH_k_CNTXT_0_CHSTATE GENMASK(23, 20) |
72 | #define GPII_n_CH_k_CNTXT_0_ERIDX GENMASK(18, 14) |
73 | #define GPII_n_CH_k_CNTXT_0_DIR BIT(3) |
74 | #define GPII_n_CH_k_CNTXT_0_PROTO GENMASK(2, 0) |
75 | |
76 | #define GPII_n_CH_k_CNTXT_0(el_size, erindex, dir, chtype_proto) \ |
77 | (FIELD_PREP(GPII_n_CH_k_CNTXT_0_EL_SIZE, el_size) | \ |
78 | FIELD_PREP(GPII_n_CH_k_CNTXT_0_ERIDX, erindex) | \ |
79 | FIELD_PREP(GPII_n_CH_k_CNTXT_0_DIR, dir) | \ |
80 | FIELD_PREP(GPII_n_CH_k_CNTXT_0_PROTO, chtype_proto)) |
81 | |
82 | #define GPI_CHTYPE_DIR_IN (0) |
83 | #define GPI_CHTYPE_DIR_OUT (1) |
84 | |
85 | #define GPI_CHTYPE_PROTO_GPI (0x2) |
86 | |
87 | #define GPII_n_CH_k_DOORBELL_0_OFFS(n, k) (0x22000 + (0x4000 * (n)) + (0x8 * (k))) |
88 | #define GPII_n_CH_CMD_OFFS(n) (0x23008 + (0x4000 * (n))) |
89 | #define GPII_n_CH_CMD_OPCODE GENMASK(31, 24) |
90 | #define GPII_n_CH_CMD_CHID GENMASK(7, 0) |
91 | #define GPII_n_CH_CMD(opcode, chid) \ |
92 | (FIELD_PREP(GPII_n_CH_CMD_OPCODE, opcode) | \ |
93 | FIELD_PREP(GPII_n_CH_CMD_CHID, chid)) |
94 | |
95 | #define GPII_n_CH_CMD_ALLOCATE (0) |
96 | #define GPII_n_CH_CMD_START (1) |
97 | #define GPII_n_CH_CMD_STOP (2) |
98 | #define GPII_n_CH_CMD_RESET (9) |
99 | #define GPII_n_CH_CMD_DE_ALLOC (10) |
100 | #define GPII_n_CH_CMD_UART_SW_STALE (32) |
101 | #define GPII_n_CH_CMD_UART_RFR_READY (33) |
102 | #define GPII_n_CH_CMD_UART_RFR_NOT_READY (34) |
103 | |
104 | /* EV Context Array */ |
105 | #define GPII_n_EV_CH_k_CNTXT_0_OFFS(n, k) (0x21000 + (0x4000 * (n)) + (0x80 * (k))) |
106 | #define GPII_n_EV_k_CNTXT_0_EL_SIZE GENMASK(31, 24) |
107 | #define GPII_n_EV_k_CNTXT_0_CHSTATE GENMASK(23, 20) |
108 | #define GPII_n_EV_k_CNTXT_0_INTYPE BIT(16) |
109 | #define GPII_n_EV_k_CNTXT_0_CHTYPE GENMASK(3, 0) |
110 | |
111 | #define GPII_n_EV_k_CNTXT_0(el_size, inttype, chtype) \ |
112 | (FIELD_PREP(GPII_n_EV_k_CNTXT_0_EL_SIZE, el_size) | \ |
113 | FIELD_PREP(GPII_n_EV_k_CNTXT_0_INTYPE, inttype) | \ |
114 | FIELD_PREP(GPII_n_EV_k_CNTXT_0_CHTYPE, chtype)) |
115 | |
116 | #define GPI_INTTYPE_IRQ (1) |
117 | #define GPI_CHTYPE_GPI_EV (0x2) |
118 | |
119 | enum CNTXT_OFFS { |
120 | CNTXT_0_CONFIG = 0x0, |
121 | CNTXT_1_R_LENGTH = 0x4, |
122 | CNTXT_2_RING_BASE_LSB = 0x8, |
123 | CNTXT_3_RING_BASE_MSB = 0xC, |
124 | CNTXT_4_RING_RP_LSB = 0x10, |
125 | CNTXT_5_RING_RP_MSB = 0x14, |
126 | CNTXT_6_RING_WP_LSB = 0x18, |
127 | CNTXT_7_RING_WP_MSB = 0x1C, |
128 | CNTXT_8_RING_INT_MOD = 0x20, |
129 | CNTXT_9_RING_INTVEC = 0x24, |
130 | CNTXT_10_RING_MSI_LSB = 0x28, |
131 | CNTXT_11_RING_MSI_MSB = 0x2C, |
132 | CNTXT_12_RING_RP_UPDATE_LSB = 0x30, |
133 | CNTXT_13_RING_RP_UPDATE_MSB = 0x34, |
134 | }; |
135 | |
136 | #define GPII_n_EV_CH_k_DOORBELL_0_OFFS(n, k) (0x22100 + (0x4000 * (n)) + (0x8 * (k))) |
137 | #define GPII_n_EV_CH_CMD_OFFS(n) (0x23010 + (0x4000 * (n))) |
138 | #define GPII_n_EV_CMD_OPCODE GENMASK(31, 24) |
139 | #define GPII_n_EV_CMD_CHID GENMASK(7, 0) |
140 | #define GPII_n_EV_CMD(opcode, chid) \ |
141 | (FIELD_PREP(GPII_n_EV_CMD_OPCODE, opcode) | \ |
142 | FIELD_PREP(GPII_n_EV_CMD_CHID, chid)) |
143 | |
144 | #define GPII_n_EV_CH_CMD_ALLOCATE (0x00) |
145 | #define GPII_n_EV_CH_CMD_RESET (0x09) |
146 | #define GPII_n_EV_CH_CMD_DE_ALLOC (0x0A) |
147 | |
148 | #define GPII_n_CNTXT_TYPE_IRQ_OFFS(n) (0x23080 + (0x4000 * (n))) |
149 | |
150 | /* mask type register */ |
151 | #define GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) (0x23088 + (0x4000 * (n))) |
152 | #define GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK GENMASK(6, 0) |
153 | #define GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL BIT(6) |
154 | #define GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB BIT(3) |
155 | #define GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB BIT(2) |
156 | #define GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL BIT(1) |
157 | #define GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL BIT(0) |
158 | |
159 | #define GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(n) (0x23090 + (0x4000 * (n))) |
160 | #define GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) (0x23094 + (0x4000 * (n))) |
161 | |
162 | /* Mask channel control interrupt register */ |
163 | #define GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(n) (0x23098 + (0x4000 * (n))) |
164 | #define GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK GENMASK(1, 0) |
165 | |
166 | /* Mask event control interrupt register */ |
167 | #define GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) (0x2309C + (0x4000 * (n))) |
168 | #define GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK BIT(0) |
169 | |
170 | #define GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(n) (0x230A0 + (0x4000 * (n))) |
171 | #define GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) (0x230A4 + (0x4000 * (n))) |
172 | |
173 | /* Mask event interrupt register */ |
174 | #define GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) (0x230B8 + (0x4000 * (n))) |
175 | #define GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK BIT(0) |
176 | |
177 | #define GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) (0x230C0 + (0x4000 * (n))) |
178 | #define GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) (0x23100 + (0x4000 * (n))) |
179 | #define GPI_GLOB_IRQ_ERROR_INT_MSK BIT(0) |
180 | |
181 | /* GPII specific Global - Enable bit register */ |
182 | #define GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(n) (0x23108 + (0x4000 * (n))) |
183 | #define GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) (0x23110 + (0x4000 * (n))) |
184 | #define GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(n) (0x23118 + (0x4000 * (n))) |
185 | |
186 | /* GPII general interrupt - Enable bit register */ |
187 | #define GPII_n_CNTXT_GPII_IRQ_EN_OFFS(n) (0x23120 + (0x4000 * (n))) |
188 | #define GPII_n_CNTXT_GPII_IRQ_EN_BMSK GENMASK(3, 0) |
189 | |
190 | #define GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(n) (0x23128 + (0x4000 * (n))) |
191 | |
192 | /* GPII Interrupt Type register */ |
193 | #define GPII_n_CNTXT_INTSET_OFFS(n) (0x23180 + (0x4000 * (n))) |
194 | #define GPII_n_CNTXT_INTSET_BMSK BIT(0) |
195 | |
196 | #define GPII_n_CNTXT_MSI_BASE_LSB_OFFS(n) (0x23188 + (0x4000 * (n))) |
197 | #define GPII_n_CNTXT_MSI_BASE_MSB_OFFS(n) (0x2318C + (0x4000 * (n))) |
198 | #define GPII_n_CNTXT_SCRATCH_0_OFFS(n) (0x23400 + (0x4000 * (n))) |
199 | #define GPII_n_CNTXT_SCRATCH_1_OFFS(n) (0x23404 + (0x4000 * (n))) |
200 | |
201 | #define GPII_n_ERROR_LOG_OFFS(n) (0x23200 + (0x4000 * (n))) |
202 | |
203 | /* QOS Registers */ |
204 | #define GPII_n_CH_k_QOS_OFFS(n, k) (0x2005C + (0x4000 * (n)) + (0x80 * (k))) |
205 | |
206 | /* Scratch registers */ |
207 | #define GPII_n_CH_k_SCRATCH_0_OFFS(n, k) (0x20060 + (0x4000 * (n)) + (0x80 * (k))) |
208 | #define GPII_n_CH_k_SCRATCH_0_SEID GENMASK(2, 0) |
209 | #define GPII_n_CH_k_SCRATCH_0_PROTO GENMASK(7, 4) |
210 | #define GPII_n_CH_k_SCRATCH_0_PAIR GENMASK(20, 16) |
211 | #define GPII_n_CH_k_SCRATCH_0(pair, proto, seid) \ |
212 | (FIELD_PREP(GPII_n_CH_k_SCRATCH_0_PAIR, pair) | \ |
213 | FIELD_PREP(GPII_n_CH_k_SCRATCH_0_PROTO, proto) | \ |
214 | FIELD_PREP(GPII_n_CH_k_SCRATCH_0_SEID, seid)) |
215 | #define GPII_n_CH_k_SCRATCH_1_OFFS(n, k) (0x20064 + (0x4000 * (n)) + (0x80 * (k))) |
216 | #define GPII_n_CH_k_SCRATCH_2_OFFS(n, k) (0x20068 + (0x4000 * (n)) + (0x80 * (k))) |
217 | #define GPII_n_CH_k_SCRATCH_3_OFFS(n, k) (0x2006C + (0x4000 * (n)) + (0x80 * (k))) |
218 | |
219 | struct __packed gpi_tre { |
220 | u32 dword[4]; |
221 | }; |
222 | |
223 | enum msm_gpi_tce_code { |
224 | MSM_GPI_TCE_SUCCESS = 1, |
225 | MSM_GPI_TCE_EOT = 2, |
226 | MSM_GPI_TCE_EOB = 4, |
227 | MSM_GPI_TCE_UNEXP_ERR = 16, |
228 | }; |
229 | |
230 | #define CMD_TIMEOUT_MS (250) |
231 | |
232 | #define MAX_CHANNELS_PER_GPII (2) |
233 | #define GPI_TX_CHAN (0) |
234 | #define GPI_RX_CHAN (1) |
235 | #define STATE_IGNORE (U32_MAX) |
236 | #define EV_FACTOR (2) |
237 | #define REQ_OF_DMA_ARGS (5) /* # of arguments required from client */ |
238 | #define CHAN_TRES 64 |
239 | |
240 | struct __packed xfer_compl_event { |
241 | u64 ptr; |
242 | u32 length:24; |
243 | u8 code; |
244 | u16 status; |
245 | u8 type; |
246 | u8 chid; |
247 | }; |
248 | |
249 | struct __packed immediate_data_event { |
250 | u8 data_bytes[8]; |
251 | u8 length:4; |
252 | u8 resvd:4; |
253 | u16 tre_index; |
254 | u8 code; |
255 | u16 status; |
256 | u8 type; |
257 | u8 chid; |
258 | }; |
259 | |
260 | struct __packed qup_notif_event { |
261 | u32 status; |
262 | u32 time; |
263 | u32 count:24; |
264 | u8 resvd; |
265 | u16 resvd1; |
266 | u8 type; |
267 | u8 chid; |
268 | }; |
269 | |
270 | struct __packed gpi_ere { |
271 | u32 dword[4]; |
272 | }; |
273 | |
274 | enum GPI_EV_TYPE { |
275 | XFER_COMPLETE_EV_TYPE = 0x22, |
276 | IMMEDIATE_DATA_EV_TYPE = 0x30, |
277 | QUP_NOTIF_EV_TYPE = 0x31, |
278 | STALE_EV_TYPE = 0xFF, |
279 | }; |
280 | |
281 | union __packed gpi_event { |
282 | struct __packed xfer_compl_event xfer_compl_event; |
283 | struct __packed immediate_data_event immediate_data_event; |
284 | struct __packed qup_notif_event qup_notif_event; |
285 | struct __packed gpi_ere gpi_ere; |
286 | }; |
287 | |
288 | enum gpii_irq_settings { |
289 | DEFAULT_IRQ_SETTINGS, |
290 | MASK_IEOB_SETTINGS, |
291 | }; |
292 | |
293 | enum gpi_ev_state { |
294 | DEFAULT_EV_CH_STATE = 0, |
295 | EV_STATE_NOT_ALLOCATED = DEFAULT_EV_CH_STATE, |
296 | EV_STATE_ALLOCATED, |
297 | MAX_EV_STATES |
298 | }; |
299 | |
300 | static const char *const gpi_ev_state_str[MAX_EV_STATES] = { |
301 | [EV_STATE_NOT_ALLOCATED] = "NOT ALLOCATED" , |
302 | [EV_STATE_ALLOCATED] = "ALLOCATED" , |
303 | }; |
304 | |
305 | #define TO_GPI_EV_STATE_STR(_state) (((_state) >= MAX_EV_STATES) ? \ |
306 | "INVALID" : gpi_ev_state_str[(_state)]) |
307 | |
308 | enum gpi_ch_state { |
309 | DEFAULT_CH_STATE = 0x0, |
310 | CH_STATE_NOT_ALLOCATED = DEFAULT_CH_STATE, |
311 | CH_STATE_ALLOCATED = 0x1, |
312 | CH_STATE_STARTED = 0x2, |
313 | CH_STATE_STOPPED = 0x3, |
314 | CH_STATE_STOP_IN_PROC = 0x4, |
315 | CH_STATE_ERROR = 0xf, |
316 | MAX_CH_STATES |
317 | }; |
318 | |
319 | enum gpi_cmd { |
320 | GPI_CH_CMD_BEGIN, |
321 | GPI_CH_CMD_ALLOCATE = GPI_CH_CMD_BEGIN, |
322 | GPI_CH_CMD_START, |
323 | GPI_CH_CMD_STOP, |
324 | GPI_CH_CMD_RESET, |
325 | GPI_CH_CMD_DE_ALLOC, |
326 | GPI_CH_CMD_UART_SW_STALE, |
327 | GPI_CH_CMD_UART_RFR_READY, |
328 | GPI_CH_CMD_UART_RFR_NOT_READY, |
329 | GPI_CH_CMD_END = GPI_CH_CMD_UART_RFR_NOT_READY, |
330 | GPI_EV_CMD_BEGIN, |
331 | GPI_EV_CMD_ALLOCATE = GPI_EV_CMD_BEGIN, |
332 | GPI_EV_CMD_RESET, |
333 | GPI_EV_CMD_DEALLOC, |
334 | GPI_EV_CMD_END = GPI_EV_CMD_DEALLOC, |
335 | GPI_MAX_CMD, |
336 | }; |
337 | |
338 | #define IS_CHAN_CMD(_cmd) ((_cmd) <= GPI_CH_CMD_END) |
339 | |
340 | static const char *const gpi_cmd_str[GPI_MAX_CMD] = { |
341 | [GPI_CH_CMD_ALLOCATE] = "CH ALLOCATE" , |
342 | [GPI_CH_CMD_START] = "CH START" , |
343 | [GPI_CH_CMD_STOP] = "CH STOP" , |
344 | [GPI_CH_CMD_RESET] = "CH_RESET" , |
345 | [GPI_CH_CMD_DE_ALLOC] = "DE ALLOC" , |
346 | [GPI_CH_CMD_UART_SW_STALE] = "UART SW STALE" , |
347 | [GPI_CH_CMD_UART_RFR_READY] = "UART RFR READY" , |
348 | [GPI_CH_CMD_UART_RFR_NOT_READY] = "UART RFR NOT READY" , |
349 | [GPI_EV_CMD_ALLOCATE] = "EV ALLOCATE" , |
350 | [GPI_EV_CMD_RESET] = "EV RESET" , |
351 | [GPI_EV_CMD_DEALLOC] = "EV DEALLOC" , |
352 | }; |
353 | |
354 | #define TO_GPI_CMD_STR(_cmd) (((_cmd) >= GPI_MAX_CMD) ? "INVALID" : \ |
355 | gpi_cmd_str[(_cmd)]) |
356 | |
357 | /* |
358 | * @DISABLE_STATE: no register access allowed |
359 | * @CONFIG_STATE: client has configured the channel |
360 | * @PREP_HARDWARE: register access is allowed |
361 | * however, no processing EVENTS |
362 | * @ACTIVE_STATE: channels are fully operational |
363 | * @PREPARE_TERMINATE: graceful termination of channels |
364 | * register access is allowed |
365 | * @PAUSE_STATE: channels are active, but not processing any events |
366 | */ |
367 | enum gpi_pm_state { |
368 | DISABLE_STATE, |
369 | CONFIG_STATE, |
370 | PREPARE_HARDWARE, |
371 | ACTIVE_STATE, |
372 | PREPARE_TERMINATE, |
373 | PAUSE_STATE, |
374 | MAX_PM_STATE |
375 | }; |
376 | |
377 | #define REG_ACCESS_VALID(_pm_state) ((_pm_state) >= PREPARE_HARDWARE) |
378 | |
379 | static const char *const gpi_pm_state_str[MAX_PM_STATE] = { |
380 | [DISABLE_STATE] = "DISABLE" , |
381 | [CONFIG_STATE] = "CONFIG" , |
382 | [PREPARE_HARDWARE] = "PREPARE HARDWARE" , |
383 | [ACTIVE_STATE] = "ACTIVE" , |
384 | [PREPARE_TERMINATE] = "PREPARE TERMINATE" , |
385 | [PAUSE_STATE] = "PAUSE" , |
386 | }; |
387 | |
388 | #define TO_GPI_PM_STR(_state) (((_state) >= MAX_PM_STATE) ? \ |
389 | "INVALID" : gpi_pm_state_str[(_state)]) |
390 | |
391 | static const struct { |
392 | enum gpi_cmd gpi_cmd; |
393 | u32 opcode; |
394 | u32 state; |
395 | } gpi_cmd_info[GPI_MAX_CMD] = { |
396 | { |
397 | GPI_CH_CMD_ALLOCATE, |
398 | GPII_n_CH_CMD_ALLOCATE, |
399 | CH_STATE_ALLOCATED, |
400 | }, |
401 | { |
402 | GPI_CH_CMD_START, |
403 | GPII_n_CH_CMD_START, |
404 | CH_STATE_STARTED, |
405 | }, |
406 | { |
407 | GPI_CH_CMD_STOP, |
408 | GPII_n_CH_CMD_STOP, |
409 | CH_STATE_STOPPED, |
410 | }, |
411 | { |
412 | GPI_CH_CMD_RESET, |
413 | GPII_n_CH_CMD_RESET, |
414 | CH_STATE_ALLOCATED, |
415 | }, |
416 | { |
417 | GPI_CH_CMD_DE_ALLOC, |
418 | GPII_n_CH_CMD_DE_ALLOC, |
419 | CH_STATE_NOT_ALLOCATED, |
420 | }, |
421 | { |
422 | GPI_CH_CMD_UART_SW_STALE, |
423 | GPII_n_CH_CMD_UART_SW_STALE, |
424 | STATE_IGNORE, |
425 | }, |
426 | { |
427 | GPI_CH_CMD_UART_RFR_READY, |
428 | GPII_n_CH_CMD_UART_RFR_READY, |
429 | STATE_IGNORE, |
430 | }, |
431 | { |
432 | GPI_CH_CMD_UART_RFR_NOT_READY, |
433 | GPII_n_CH_CMD_UART_RFR_NOT_READY, |
434 | STATE_IGNORE, |
435 | }, |
436 | { |
437 | GPI_EV_CMD_ALLOCATE, |
438 | GPII_n_EV_CH_CMD_ALLOCATE, |
439 | EV_STATE_ALLOCATED, |
440 | }, |
441 | { |
442 | GPI_EV_CMD_RESET, |
443 | GPII_n_EV_CH_CMD_RESET, |
444 | EV_STATE_ALLOCATED, |
445 | }, |
446 | { |
447 | GPI_EV_CMD_DEALLOC, |
448 | GPII_n_EV_CH_CMD_DE_ALLOC, |
449 | EV_STATE_NOT_ALLOCATED, |
450 | }, |
451 | }; |
452 | |
453 | struct gpi_ring { |
454 | void *pre_aligned; |
455 | size_t alloc_size; |
456 | phys_addr_t phys_addr; |
457 | dma_addr_t dma_handle; |
458 | void *base; |
459 | void *wp; |
460 | void *rp; |
461 | u32 len; |
462 | u32 el_size; |
463 | u32 elements; |
464 | bool configured; |
465 | }; |
466 | |
467 | struct gpi_dev { |
468 | struct dma_device dma_device; |
469 | struct device *dev; |
470 | struct resource *res; |
471 | void __iomem *regs; |
472 | void __iomem *ee_base; /*ee register base address*/ |
473 | u32 max_gpii; /* maximum # of gpii instances available per gpi block */ |
474 | u32 gpii_mask; /* gpii instances available for apps */ |
475 | u32 ev_factor; /* ev ring length factor */ |
476 | struct gpii *gpiis; |
477 | }; |
478 | |
479 | struct reg_info { |
480 | char *name; |
481 | u32 offset; |
482 | u32 val; |
483 | }; |
484 | |
485 | struct gchan { |
486 | struct virt_dma_chan vc; |
487 | u32 chid; |
488 | u32 seid; |
489 | u32 protocol; |
490 | struct gpii *gpii; |
491 | enum gpi_ch_state ch_state; |
492 | enum gpi_pm_state pm_state; |
493 | void __iomem *ch_cntxt_base_reg; |
494 | void __iomem *ch_cntxt_db_reg; |
495 | void __iomem *ch_cmd_reg; |
496 | u32 dir; |
497 | struct gpi_ring ch_ring; |
498 | void *config; |
499 | }; |
500 | |
501 | struct gpii { |
502 | u32 gpii_id; |
503 | struct gchan gchan[MAX_CHANNELS_PER_GPII]; |
504 | struct gpi_dev *gpi_dev; |
505 | int irq; |
506 | void __iomem *regs; /* points to gpi top */ |
507 | void __iomem *ev_cntxt_base_reg; |
508 | void __iomem *ev_cntxt_db_reg; |
509 | void __iomem *ev_ring_rp_lsb_reg; |
510 | void __iomem *ev_cmd_reg; |
511 | void __iomem *ieob_clr_reg; |
512 | struct mutex ctrl_lock; |
513 | enum gpi_ev_state ev_state; |
514 | bool configured_irq; |
515 | enum gpi_pm_state pm_state; |
516 | rwlock_t pm_lock; |
517 | struct gpi_ring ev_ring; |
518 | struct tasklet_struct ev_task; /* event processing tasklet */ |
519 | struct completion cmd_completion; |
520 | enum gpi_cmd gpi_cmd; |
521 | u32 cntxt_type_irq_msk; |
522 | bool ieob_set; |
523 | }; |
524 | |
525 | #define MAX_TRE 3 |
526 | |
527 | struct gpi_desc { |
528 | struct virt_dma_desc vd; |
529 | size_t len; |
530 | void *db; /* DB register to program */ |
531 | struct gchan *gchan; |
532 | struct gpi_tre tre[MAX_TRE]; |
533 | u32 num_tre; |
534 | }; |
535 | |
536 | static const u32 GPII_CHAN_DIR[MAX_CHANNELS_PER_GPII] = { |
537 | GPI_CHTYPE_DIR_OUT, GPI_CHTYPE_DIR_IN |
538 | }; |
539 | |
540 | static irqreturn_t gpi_handle_irq(int irq, void *data); |
541 | static void gpi_ring_recycle_ev_element(struct gpi_ring *ring); |
542 | static int gpi_ring_add_element(struct gpi_ring *ring, void **wp); |
543 | static void gpi_process_events(struct gpii *gpii); |
544 | |
545 | static inline struct gchan *to_gchan(struct dma_chan *dma_chan) |
546 | { |
547 | return container_of(dma_chan, struct gchan, vc.chan); |
548 | } |
549 | |
550 | static inline struct gpi_desc *to_gpi_desc(struct virt_dma_desc *vd) |
551 | { |
552 | return container_of(vd, struct gpi_desc, vd); |
553 | } |
554 | |
555 | static inline phys_addr_t to_physical(const struct gpi_ring *const ring, |
556 | void *addr) |
557 | { |
558 | return ring->phys_addr + (addr - ring->base); |
559 | } |
560 | |
561 | static inline void *to_virtual(const struct gpi_ring *const ring, phys_addr_t addr) |
562 | { |
563 | return ring->base + (addr - ring->phys_addr); |
564 | } |
565 | |
566 | static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr) |
567 | { |
568 | return readl_relaxed(addr); |
569 | } |
570 | |
571 | static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val) |
572 | { |
573 | writel_relaxed(val, addr); |
574 | } |
575 | |
576 | /* gpi_write_reg_field - write to specific bit field */ |
577 | static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr, |
578 | u32 mask, u32 shift, u32 val) |
579 | { |
580 | u32 tmp = gpi_read_reg(gpii, addr); |
581 | |
582 | tmp &= ~mask; |
583 | val = tmp | ((val << shift) & mask); |
584 | gpi_write_reg(gpii, addr, val); |
585 | } |
586 | |
587 | static __always_inline void |
588 | gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val) |
589 | { |
590 | void __iomem *addr = gpii->regs + offset; |
591 | u32 tmp = gpi_read_reg(gpii, addr); |
592 | |
593 | tmp &= ~mask; |
594 | tmp |= u32_encode_bits(v: val, field: mask); |
595 | |
596 | gpi_write_reg(gpii, addr, val: tmp); |
597 | } |
598 | |
599 | static void gpi_disable_interrupts(struct gpii *gpii) |
600 | { |
601 | gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id), |
602 | GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, val: 0); |
603 | gpi_update_reg(gpii, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii->gpii_id), |
604 | GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK, val: 0); |
605 | gpi_update_reg(gpii, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii->gpii_id), |
606 | GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK, val: 0); |
607 | gpi_update_reg(gpii, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii->gpii_id), |
608 | GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK, val: 0); |
609 | gpi_update_reg(gpii, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii->gpii_id), |
610 | GPII_n_CNTXT_GPII_IRQ_EN_BMSK, val: 0); |
611 | gpi_update_reg(gpii, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii->gpii_id), |
612 | GPII_n_CNTXT_GPII_IRQ_EN_BMSK, val: 0); |
613 | gpi_update_reg(gpii, GPII_n_CNTXT_INTSET_OFFS(gpii->gpii_id), |
614 | GPII_n_CNTXT_INTSET_BMSK, val: 0); |
615 | |
616 | gpii->cntxt_type_irq_msk = 0; |
617 | devm_free_irq(dev: gpii->gpi_dev->dev, irq: gpii->irq, dev_id: gpii); |
618 | gpii->configured_irq = false; |
619 | } |
620 | |
621 | /* configure and enable interrupts */ |
622 | static int gpi_config_interrupts(struct gpii *gpii, enum gpii_irq_settings settings, bool mask) |
623 | { |
624 | const u32 enable = (GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL | |
625 | GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB | |
626 | GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB | |
627 | GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL | |
628 | GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL); |
629 | int ret; |
630 | |
631 | if (!gpii->configured_irq) { |
632 | ret = devm_request_irq(dev: gpii->gpi_dev->dev, irq: gpii->irq, |
633 | handler: gpi_handle_irq, IRQF_TRIGGER_HIGH, |
634 | devname: "gpi-dma" , dev_id: gpii); |
635 | if (ret < 0) { |
636 | dev_err(gpii->gpi_dev->dev, "error request irq:%d ret:%d\n" , |
637 | gpii->irq, ret); |
638 | return ret; |
639 | } |
640 | } |
641 | |
642 | if (settings == MASK_IEOB_SETTINGS) { |
643 | /* |
644 | * GPII only uses one EV ring per gpii so we can globally |
645 | * enable/disable IEOB interrupt |
646 | */ |
647 | if (mask) |
648 | gpii->cntxt_type_irq_msk |= GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB; |
649 | else |
650 | gpii->cntxt_type_irq_msk &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB); |
651 | gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id), |
652 | GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, val: gpii->cntxt_type_irq_msk); |
653 | } else { |
654 | gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id), |
655 | GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, val: enable); |
656 | gpi_update_reg(gpii, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii->gpii_id), |
657 | GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK, |
658 | GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK); |
659 | gpi_update_reg(gpii, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii->gpii_id), |
660 | GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK, |
661 | GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK); |
662 | gpi_update_reg(gpii, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii->gpii_id), |
663 | GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK, |
664 | GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK); |
665 | gpi_update_reg(gpii, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii->gpii_id), |
666 | GPII_n_CNTXT_GPII_IRQ_EN_BMSK, |
667 | GPII_n_CNTXT_GPII_IRQ_EN_BMSK); |
668 | gpi_update_reg(gpii, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii->gpii_id), |
669 | GPII_n_CNTXT_GPII_IRQ_EN_BMSK, GPII_n_CNTXT_GPII_IRQ_EN_BMSK); |
670 | gpi_update_reg(gpii, GPII_n_CNTXT_MSI_BASE_LSB_OFFS(gpii->gpii_id), U32_MAX, val: 0); |
671 | gpi_update_reg(gpii, GPII_n_CNTXT_MSI_BASE_MSB_OFFS(gpii->gpii_id), U32_MAX, val: 0); |
672 | gpi_update_reg(gpii, GPII_n_CNTXT_SCRATCH_0_OFFS(gpii->gpii_id), U32_MAX, val: 0); |
673 | gpi_update_reg(gpii, GPII_n_CNTXT_SCRATCH_1_OFFS(gpii->gpii_id), U32_MAX, val: 0); |
674 | gpi_update_reg(gpii, GPII_n_CNTXT_INTSET_OFFS(gpii->gpii_id), |
675 | GPII_n_CNTXT_INTSET_BMSK, val: 1); |
676 | gpi_update_reg(gpii, GPII_n_ERROR_LOG_OFFS(gpii->gpii_id), U32_MAX, val: 0); |
677 | |
678 | gpii->cntxt_type_irq_msk = enable; |
679 | } |
680 | |
681 | gpii->configured_irq = true; |
682 | return 0; |
683 | } |
684 | |
685 | /* Sends gpii event or channel command */ |
686 | static int gpi_send_cmd(struct gpii *gpii, struct gchan *gchan, |
687 | enum gpi_cmd gpi_cmd) |
688 | { |
689 | u32 chid = MAX_CHANNELS_PER_GPII; |
690 | unsigned long timeout; |
691 | void __iomem *cmd_reg; |
692 | u32 cmd; |
693 | |
694 | if (gpi_cmd >= GPI_MAX_CMD) |
695 | return -EINVAL; |
696 | if (IS_CHAN_CMD(gpi_cmd)) |
697 | chid = gchan->chid; |
698 | |
699 | dev_dbg(gpii->gpi_dev->dev, |
700 | "sending cmd: %s:%u\n" , TO_GPI_CMD_STR(gpi_cmd), chid); |
701 | |
702 | /* send opcode and wait for completion */ |
703 | reinit_completion(x: &gpii->cmd_completion); |
704 | gpii->gpi_cmd = gpi_cmd; |
705 | |
706 | cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gchan->ch_cmd_reg : gpii->ev_cmd_reg; |
707 | cmd = IS_CHAN_CMD(gpi_cmd) ? GPII_n_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, chid) : |
708 | GPII_n_EV_CMD(gpi_cmd_info[gpi_cmd].opcode, 0); |
709 | gpi_write_reg(gpii, addr: cmd_reg, val: cmd); |
710 | timeout = wait_for_completion_timeout(x: &gpii->cmd_completion, |
711 | timeout: msecs_to_jiffies(CMD_TIMEOUT_MS)); |
712 | if (!timeout) { |
713 | dev_err(gpii->gpi_dev->dev, "cmd: %s completion timeout:%u\n" , |
714 | TO_GPI_CMD_STR(gpi_cmd), chid); |
715 | return -EIO; |
716 | } |
717 | |
718 | /* confirm new ch state is correct , if the cmd is a state change cmd */ |
719 | if (gpi_cmd_info[gpi_cmd].state == STATE_IGNORE) |
720 | return 0; |
721 | |
722 | if (IS_CHAN_CMD(gpi_cmd) && gchan->ch_state == gpi_cmd_info[gpi_cmd].state) |
723 | return 0; |
724 | |
725 | if (!IS_CHAN_CMD(gpi_cmd) && gpii->ev_state == gpi_cmd_info[gpi_cmd].state) |
726 | return 0; |
727 | |
728 | return -EIO; |
729 | } |
730 | |
731 | /* program transfer ring DB register */ |
732 | static inline void gpi_write_ch_db(struct gchan *gchan, |
733 | struct gpi_ring *ring, void *wp) |
734 | { |
735 | struct gpii *gpii = gchan->gpii; |
736 | phys_addr_t p_wp; |
737 | |
738 | p_wp = to_physical(ring, addr: wp); |
739 | gpi_write_reg(gpii, addr: gchan->ch_cntxt_db_reg, val: p_wp); |
740 | } |
741 | |
742 | /* program event ring DB register */ |
743 | static inline void gpi_write_ev_db(struct gpii *gpii, |
744 | struct gpi_ring *ring, void *wp) |
745 | { |
746 | phys_addr_t p_wp; |
747 | |
748 | p_wp = ring->phys_addr + (wp - ring->base); |
749 | gpi_write_reg(gpii, addr: gpii->ev_cntxt_db_reg, val: p_wp); |
750 | } |
751 | |
752 | /* process transfer completion interrupt */ |
753 | static void gpi_process_ieob(struct gpii *gpii) |
754 | { |
755 | gpi_write_reg(gpii, addr: gpii->ieob_clr_reg, BIT(0)); |
756 | |
757 | gpi_config_interrupts(gpii, settings: MASK_IEOB_SETTINGS, mask: 0); |
758 | tasklet_hi_schedule(t: &gpii->ev_task); |
759 | } |
760 | |
761 | /* process channel control interrupt */ |
762 | static void gpi_process_ch_ctrl_irq(struct gpii *gpii) |
763 | { |
764 | u32 gpii_id = gpii->gpii_id; |
765 | u32 offset = GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id); |
766 | u32 ch_irq = gpi_read_reg(gpii, addr: gpii->regs + offset); |
767 | struct gchan *gchan; |
768 | u32 chid, state; |
769 | |
770 | /* clear the status */ |
771 | offset = GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id); |
772 | gpi_write_reg(gpii, addr: gpii->regs + offset, val: (u32)ch_irq); |
773 | |
774 | for (chid = 0; chid < MAX_CHANNELS_PER_GPII; chid++) { |
775 | if (!(BIT(chid) & ch_irq)) |
776 | continue; |
777 | |
778 | gchan = &gpii->gchan[chid]; |
779 | state = gpi_read_reg(gpii, addr: gchan->ch_cntxt_base_reg + |
780 | CNTXT_0_CONFIG); |
781 | state = FIELD_GET(GPII_n_CH_k_CNTXT_0_CHSTATE, state); |
782 | |
783 | /* |
784 | * CH_CMD_DEALLOC cmd always successful. However cmd does |
785 | * not change hardware status. So overwriting software state |
786 | * to default state. |
787 | */ |
788 | if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC) |
789 | state = DEFAULT_CH_STATE; |
790 | gchan->ch_state = state; |
791 | |
792 | /* |
793 | * Triggering complete all if ch_state is not a stop in process. |
794 | * Stop in process is a transition state and we will wait for |
795 | * stop interrupt before notifying. |
796 | */ |
797 | if (gchan->ch_state != CH_STATE_STOP_IN_PROC) |
798 | complete_all(&gpii->cmd_completion); |
799 | } |
800 | } |
801 | |
802 | /* processing gpi general error interrupts */ |
803 | static void gpi_process_gen_err_irq(struct gpii *gpii) |
804 | { |
805 | u32 gpii_id = gpii->gpii_id; |
806 | u32 offset = GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(gpii_id); |
807 | u32 irq_stts = gpi_read_reg(gpii, addr: gpii->regs + offset); |
808 | |
809 | /* clear the status */ |
810 | dev_dbg(gpii->gpi_dev->dev, "irq_stts:0x%x\n" , irq_stts); |
811 | |
812 | /* Clear the register */ |
813 | offset = GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(gpii_id); |
814 | gpi_write_reg(gpii, addr: gpii->regs + offset, val: irq_stts); |
815 | } |
816 | |
817 | /* processing gpi level error interrupts */ |
818 | static void gpi_process_glob_err_irq(struct gpii *gpii) |
819 | { |
820 | u32 gpii_id = gpii->gpii_id; |
821 | u32 offset = GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id); |
822 | u32 irq_stts = gpi_read_reg(gpii, addr: gpii->regs + offset); |
823 | |
824 | offset = GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id); |
825 | gpi_write_reg(gpii, addr: gpii->regs + offset, val: irq_stts); |
826 | |
827 | /* only error interrupt should be set */ |
828 | if (irq_stts & ~GPI_GLOB_IRQ_ERROR_INT_MSK) { |
829 | dev_err(gpii->gpi_dev->dev, "invalid error status:0x%x\n" , irq_stts); |
830 | return; |
831 | } |
832 | |
833 | offset = GPII_n_ERROR_LOG_OFFS(gpii_id); |
834 | gpi_write_reg(gpii, addr: gpii->regs + offset, val: 0); |
835 | } |
836 | |
837 | /* gpii interrupt handler */ |
838 | static irqreturn_t gpi_handle_irq(int irq, void *data) |
839 | { |
840 | struct gpii *gpii = data; |
841 | u32 gpii_id = gpii->gpii_id; |
842 | u32 type, offset; |
843 | unsigned long flags; |
844 | |
845 | read_lock_irqsave(&gpii->pm_lock, flags); |
846 | |
847 | /* |
848 | * States are out of sync to receive interrupt |
849 | * while software state is in DISABLE state, bailing out. |
850 | */ |
851 | if (!REG_ACCESS_VALID(gpii->pm_state)) { |
852 | dev_err(gpii->gpi_dev->dev, "receive interrupt while in %s state\n" , |
853 | TO_GPI_PM_STR(gpii->pm_state)); |
854 | goto exit_irq; |
855 | } |
856 | |
857 | offset = GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id); |
858 | type = gpi_read_reg(gpii, addr: gpii->regs + offset); |
859 | |
860 | do { |
861 | /* global gpii error */ |
862 | if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB) { |
863 | gpi_process_glob_err_irq(gpii); |
864 | type &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB); |
865 | } |
866 | |
867 | /* transfer complete interrupt */ |
868 | if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) { |
869 | gpi_process_ieob(gpii); |
870 | type &= ~GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB; |
871 | } |
872 | |
873 | /* event control irq */ |
874 | if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) { |
875 | u32 ev_state; |
876 | u32 ev_ch_irq; |
877 | |
878 | dev_dbg(gpii->gpi_dev->dev, |
879 | "processing EV CTRL interrupt\n" ); |
880 | offset = GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id); |
881 | ev_ch_irq = gpi_read_reg(gpii, addr: gpii->regs + offset); |
882 | |
883 | offset = GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS |
884 | (gpii_id); |
885 | gpi_write_reg(gpii, addr: gpii->regs + offset, val: ev_ch_irq); |
886 | ev_state = gpi_read_reg(gpii, addr: gpii->ev_cntxt_base_reg + |
887 | CNTXT_0_CONFIG); |
888 | ev_state = FIELD_GET(GPII_n_EV_k_CNTXT_0_CHSTATE, ev_state); |
889 | |
890 | /* |
891 | * CMD EV_CMD_DEALLOC is always successful. However |
892 | * cmd does not change hardware status. So overwriting |
893 | * software state to default state. |
894 | */ |
895 | if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC) |
896 | ev_state = DEFAULT_EV_CH_STATE; |
897 | |
898 | gpii->ev_state = ev_state; |
899 | dev_dbg(gpii->gpi_dev->dev, "setting EV state to %s\n" , |
900 | TO_GPI_EV_STATE_STR(gpii->ev_state)); |
901 | complete_all(&gpii->cmd_completion); |
902 | type &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL); |
903 | } |
904 | |
905 | /* channel control irq */ |
906 | if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL) { |
907 | dev_dbg(gpii->gpi_dev->dev, "process CH CTRL interrupts\n" ); |
908 | gpi_process_ch_ctrl_irq(gpii); |
909 | type &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL); |
910 | } |
911 | |
912 | if (type) { |
913 | dev_err(gpii->gpi_dev->dev, "Unhandled interrupt status:0x%x\n" , type); |
914 | gpi_process_gen_err_irq(gpii); |
915 | goto exit_irq; |
916 | } |
917 | |
918 | offset = GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id); |
919 | type = gpi_read_reg(gpii, addr: gpii->regs + offset); |
920 | } while (type); |
921 | |
922 | exit_irq: |
923 | read_unlock_irqrestore(&gpii->pm_lock, flags); |
924 | |
925 | return IRQ_HANDLED; |
926 | } |
927 | |
928 | /* process DMA Immediate completion data events */ |
929 | static void gpi_process_imed_data_event(struct gchan *gchan, |
930 | struct immediate_data_event *imed_event) |
931 | { |
932 | struct gpii *gpii = gchan->gpii; |
933 | struct gpi_ring *ch_ring = &gchan->ch_ring; |
934 | void *tre = ch_ring->base + (ch_ring->el_size * imed_event->tre_index); |
935 | struct dmaengine_result result; |
936 | struct gpi_desc *gpi_desc; |
937 | struct virt_dma_desc *vd; |
938 | unsigned long flags; |
939 | u32 chid; |
940 | |
941 | /* |
942 | * If channel not active don't process event |
943 | */ |
944 | if (gchan->pm_state != ACTIVE_STATE) { |
945 | dev_err(gpii->gpi_dev->dev, "skipping processing event because ch @ %s state\n" , |
946 | TO_GPI_PM_STR(gchan->pm_state)); |
947 | return; |
948 | } |
949 | |
950 | spin_lock_irqsave(&gchan->vc.lock, flags); |
951 | vd = vchan_next_desc(vc: &gchan->vc); |
952 | if (!vd) { |
953 | struct gpi_ere *gpi_ere; |
954 | struct gpi_tre *gpi_tre; |
955 | |
956 | spin_unlock_irqrestore(lock: &gchan->vc.lock, flags); |
957 | dev_dbg(gpii->gpi_dev->dev, "event without a pending descriptor!\n" ); |
958 | gpi_ere = (struct gpi_ere *)imed_event; |
959 | dev_dbg(gpii->gpi_dev->dev, |
960 | "Event: %08x %08x %08x %08x\n" , |
961 | gpi_ere->dword[0], gpi_ere->dword[1], |
962 | gpi_ere->dword[2], gpi_ere->dword[3]); |
963 | gpi_tre = tre; |
964 | dev_dbg(gpii->gpi_dev->dev, |
965 | "Pending TRE: %08x %08x %08x %08x\n" , |
966 | gpi_tre->dword[0], gpi_tre->dword[1], |
967 | gpi_tre->dword[2], gpi_tre->dword[3]); |
968 | return; |
969 | } |
970 | gpi_desc = to_gpi_desc(vd); |
971 | spin_unlock_irqrestore(lock: &gchan->vc.lock, flags); |
972 | |
973 | /* |
974 | * RP pointed by Event is to last TRE processed, |
975 | * we need to update ring rp to tre + 1 |
976 | */ |
977 | tre += ch_ring->el_size; |
978 | if (tre >= (ch_ring->base + ch_ring->len)) |
979 | tre = ch_ring->base; |
980 | ch_ring->rp = tre; |
981 | |
982 | /* make sure rp updates are immediately visible to all cores */ |
983 | smp_wmb(); |
984 | |
985 | chid = imed_event->chid; |
986 | if (imed_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) { |
987 | if (chid == GPI_RX_CHAN) |
988 | goto gpi_free_desc; |
989 | else |
990 | return; |
991 | } |
992 | |
993 | if (imed_event->code == MSM_GPI_TCE_UNEXP_ERR) |
994 | result.result = DMA_TRANS_ABORTED; |
995 | else |
996 | result.result = DMA_TRANS_NOERROR; |
997 | result.residue = gpi_desc->len - imed_event->length; |
998 | |
999 | dma_cookie_complete(tx: &vd->tx); |
1000 | dmaengine_desc_get_callback_invoke(tx: &vd->tx, result: &result); |
1001 | |
1002 | gpi_free_desc: |
1003 | spin_lock_irqsave(&gchan->vc.lock, flags); |
1004 | list_del(entry: &vd->node); |
1005 | spin_unlock_irqrestore(lock: &gchan->vc.lock, flags); |
1006 | kfree(objp: gpi_desc); |
1007 | gpi_desc = NULL; |
1008 | } |
1009 | |
1010 | /* processing transfer completion events */ |
1011 | static void gpi_process_xfer_compl_event(struct gchan *gchan, |
1012 | struct xfer_compl_event *compl_event) |
1013 | { |
1014 | struct gpii *gpii = gchan->gpii; |
1015 | struct gpi_ring *ch_ring = &gchan->ch_ring; |
1016 | void *ev_rp = to_virtual(ring: ch_ring, addr: compl_event->ptr); |
1017 | struct virt_dma_desc *vd; |
1018 | struct gpi_desc *gpi_desc; |
1019 | struct dmaengine_result result; |
1020 | unsigned long flags; |
1021 | u32 chid; |
1022 | |
1023 | /* only process events on active channel */ |
1024 | if (unlikely(gchan->pm_state != ACTIVE_STATE)) { |
1025 | dev_err(gpii->gpi_dev->dev, "skipping processing event because ch @ %s state\n" , |
1026 | TO_GPI_PM_STR(gchan->pm_state)); |
1027 | return; |
1028 | } |
1029 | |
1030 | spin_lock_irqsave(&gchan->vc.lock, flags); |
1031 | vd = vchan_next_desc(vc: &gchan->vc); |
1032 | if (!vd) { |
1033 | struct gpi_ere *gpi_ere; |
1034 | |
1035 | spin_unlock_irqrestore(lock: &gchan->vc.lock, flags); |
1036 | dev_err(gpii->gpi_dev->dev, "Event without a pending descriptor!\n" ); |
1037 | gpi_ere = (struct gpi_ere *)compl_event; |
1038 | dev_err(gpii->gpi_dev->dev, |
1039 | "Event: %08x %08x %08x %08x\n" , |
1040 | gpi_ere->dword[0], gpi_ere->dword[1], |
1041 | gpi_ere->dword[2], gpi_ere->dword[3]); |
1042 | return; |
1043 | } |
1044 | |
1045 | gpi_desc = to_gpi_desc(vd); |
1046 | spin_unlock_irqrestore(lock: &gchan->vc.lock, flags); |
1047 | |
1048 | /* |
1049 | * RP pointed by Event is to last TRE processed, |
1050 | * we need to update ring rp to ev_rp + 1 |
1051 | */ |
1052 | ev_rp += ch_ring->el_size; |
1053 | if (ev_rp >= (ch_ring->base + ch_ring->len)) |
1054 | ev_rp = ch_ring->base; |
1055 | ch_ring->rp = ev_rp; |
1056 | |
1057 | /* update must be visible to other cores */ |
1058 | smp_wmb(); |
1059 | |
1060 | chid = compl_event->chid; |
1061 | if (compl_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) { |
1062 | if (chid == GPI_RX_CHAN) |
1063 | goto gpi_free_desc; |
1064 | else |
1065 | return; |
1066 | } |
1067 | |
1068 | if (compl_event->code == MSM_GPI_TCE_UNEXP_ERR) { |
1069 | dev_err(gpii->gpi_dev->dev, "Error in Transaction\n" ); |
1070 | result.result = DMA_TRANS_ABORTED; |
1071 | } else { |
1072 | dev_dbg(gpii->gpi_dev->dev, "Transaction Success\n" ); |
1073 | result.result = DMA_TRANS_NOERROR; |
1074 | } |
1075 | result.residue = gpi_desc->len - compl_event->length; |
1076 | dev_dbg(gpii->gpi_dev->dev, "Residue %d\n" , result.residue); |
1077 | |
1078 | dma_cookie_complete(tx: &vd->tx); |
1079 | dmaengine_desc_get_callback_invoke(tx: &vd->tx, result: &result); |
1080 | |
1081 | gpi_free_desc: |
1082 | spin_lock_irqsave(&gchan->vc.lock, flags); |
1083 | list_del(entry: &vd->node); |
1084 | spin_unlock_irqrestore(lock: &gchan->vc.lock, flags); |
1085 | kfree(objp: gpi_desc); |
1086 | gpi_desc = NULL; |
1087 | } |
1088 | |
1089 | /* process all events */ |
1090 | static void gpi_process_events(struct gpii *gpii) |
1091 | { |
1092 | struct gpi_ring *ev_ring = &gpii->ev_ring; |
1093 | phys_addr_t cntxt_rp; |
1094 | void *rp; |
1095 | union gpi_event *gpi_event; |
1096 | struct gchan *gchan; |
1097 | u32 chid, type; |
1098 | |
1099 | cntxt_rp = gpi_read_reg(gpii, addr: gpii->ev_ring_rp_lsb_reg); |
1100 | rp = to_virtual(ring: ev_ring, addr: cntxt_rp); |
1101 | |
1102 | do { |
1103 | while (rp != ev_ring->rp) { |
1104 | gpi_event = ev_ring->rp; |
1105 | chid = gpi_event->xfer_compl_event.chid; |
1106 | type = gpi_event->xfer_compl_event.type; |
1107 | |
1108 | dev_dbg(gpii->gpi_dev->dev, |
1109 | "Event: CHID:%u, type:%x %08x %08x %08x %08x\n" , |
1110 | chid, type, gpi_event->gpi_ere.dword[0], |
1111 | gpi_event->gpi_ere.dword[1], gpi_event->gpi_ere.dword[2], |
1112 | gpi_event->gpi_ere.dword[3]); |
1113 | |
1114 | switch (type) { |
1115 | case XFER_COMPLETE_EV_TYPE: |
1116 | gchan = &gpii->gchan[chid]; |
1117 | gpi_process_xfer_compl_event(gchan, |
1118 | compl_event: &gpi_event->xfer_compl_event); |
1119 | break; |
1120 | case STALE_EV_TYPE: |
1121 | dev_dbg(gpii->gpi_dev->dev, "stale event, not processing\n" ); |
1122 | break; |
1123 | case IMMEDIATE_DATA_EV_TYPE: |
1124 | gchan = &gpii->gchan[chid]; |
1125 | gpi_process_imed_data_event(gchan, |
1126 | imed_event: &gpi_event->immediate_data_event); |
1127 | break; |
1128 | case QUP_NOTIF_EV_TYPE: |
1129 | dev_dbg(gpii->gpi_dev->dev, "QUP_NOTIF_EV_TYPE\n" ); |
1130 | break; |
1131 | default: |
1132 | dev_dbg(gpii->gpi_dev->dev, |
1133 | "not supported event type:0x%x\n" , type); |
1134 | } |
1135 | gpi_ring_recycle_ev_element(ring: ev_ring); |
1136 | } |
1137 | gpi_write_ev_db(gpii, ring: ev_ring, wp: ev_ring->wp); |
1138 | |
1139 | /* clear pending IEOB events */ |
1140 | gpi_write_reg(gpii, addr: gpii->ieob_clr_reg, BIT(0)); |
1141 | |
1142 | cntxt_rp = gpi_read_reg(gpii, addr: gpii->ev_ring_rp_lsb_reg); |
1143 | rp = to_virtual(ring: ev_ring, addr: cntxt_rp); |
1144 | |
1145 | } while (rp != ev_ring->rp); |
1146 | } |
1147 | |
1148 | /* processing events using tasklet */ |
1149 | static void gpi_ev_tasklet(unsigned long data) |
1150 | { |
1151 | struct gpii *gpii = (struct gpii *)data; |
1152 | |
1153 | read_lock(&gpii->pm_lock); |
1154 | if (!REG_ACCESS_VALID(gpii->pm_state)) { |
1155 | read_unlock(&gpii->pm_lock); |
1156 | dev_err(gpii->gpi_dev->dev, "not processing any events, pm_state:%s\n" , |
1157 | TO_GPI_PM_STR(gpii->pm_state)); |
1158 | return; |
1159 | } |
1160 | |
1161 | /* process the events */ |
1162 | gpi_process_events(gpii); |
1163 | |
1164 | /* enable IEOB, switching back to interrupts */ |
1165 | gpi_config_interrupts(gpii, settings: MASK_IEOB_SETTINGS, mask: 1); |
1166 | read_unlock(&gpii->pm_lock); |
1167 | } |
1168 | |
1169 | /* marks all pending events for the channel as stale */ |
1170 | static void gpi_mark_stale_events(struct gchan *gchan) |
1171 | { |
1172 | struct gpii *gpii = gchan->gpii; |
1173 | struct gpi_ring *ev_ring = &gpii->ev_ring; |
1174 | u32 cntxt_rp, local_rp; |
1175 | void *ev_rp; |
1176 | |
1177 | cntxt_rp = gpi_read_reg(gpii, addr: gpii->ev_ring_rp_lsb_reg); |
1178 | |
1179 | ev_rp = ev_ring->rp; |
1180 | local_rp = (u32)to_physical(ring: ev_ring, addr: ev_rp); |
1181 | while (local_rp != cntxt_rp) { |
1182 | union gpi_event *gpi_event = ev_rp; |
1183 | u32 chid = gpi_event->xfer_compl_event.chid; |
1184 | |
1185 | if (chid == gchan->chid) |
1186 | gpi_event->xfer_compl_event.type = STALE_EV_TYPE; |
1187 | ev_rp += ev_ring->el_size; |
1188 | if (ev_rp >= (ev_ring->base + ev_ring->len)) |
1189 | ev_rp = ev_ring->base; |
1190 | cntxt_rp = gpi_read_reg(gpii, addr: gpii->ev_ring_rp_lsb_reg); |
1191 | local_rp = (u32)to_physical(ring: ev_ring, addr: ev_rp); |
1192 | } |
1193 | } |
1194 | |
1195 | /* reset sw state and issue channel reset or de-alloc */ |
1196 | static int gpi_reset_chan(struct gchan *gchan, enum gpi_cmd gpi_cmd) |
1197 | { |
1198 | struct gpii *gpii = gchan->gpii; |
1199 | struct gpi_ring *ch_ring = &gchan->ch_ring; |
1200 | unsigned long flags; |
1201 | LIST_HEAD(list); |
1202 | int ret; |
1203 | |
1204 | ret = gpi_send_cmd(gpii, gchan, gpi_cmd); |
1205 | if (ret) { |
1206 | dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n" , |
1207 | TO_GPI_CMD_STR(gpi_cmd), ret); |
1208 | return ret; |
1209 | } |
1210 | |
1211 | /* initialize the local ring ptrs */ |
1212 | ch_ring->rp = ch_ring->base; |
1213 | ch_ring->wp = ch_ring->base; |
1214 | |
1215 | /* visible to other cores */ |
1216 | smp_wmb(); |
1217 | |
1218 | /* check event ring for any stale events */ |
1219 | write_lock_irq(&gpii->pm_lock); |
1220 | gpi_mark_stale_events(gchan); |
1221 | |
1222 | /* remove all async descriptors */ |
1223 | spin_lock_irqsave(&gchan->vc.lock, flags); |
1224 | vchan_get_all_descriptors(vc: &gchan->vc, head: &list); |
1225 | spin_unlock_irqrestore(lock: &gchan->vc.lock, flags); |
1226 | write_unlock_irq(&gpii->pm_lock); |
1227 | vchan_dma_desc_free_list(vc: &gchan->vc, head: &list); |
1228 | |
1229 | return 0; |
1230 | } |
1231 | |
1232 | static int gpi_start_chan(struct gchan *gchan) |
1233 | { |
1234 | struct gpii *gpii = gchan->gpii; |
1235 | int ret; |
1236 | |
1237 | ret = gpi_send_cmd(gpii, gchan, gpi_cmd: GPI_CH_CMD_START); |
1238 | if (ret) { |
1239 | dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n" , |
1240 | TO_GPI_CMD_STR(GPI_CH_CMD_START), ret); |
1241 | return ret; |
1242 | } |
1243 | |
1244 | /* gpii CH is active now */ |
1245 | write_lock_irq(&gpii->pm_lock); |
1246 | gchan->pm_state = ACTIVE_STATE; |
1247 | write_unlock_irq(&gpii->pm_lock); |
1248 | |
1249 | return 0; |
1250 | } |
1251 | |
1252 | static int gpi_stop_chan(struct gchan *gchan) |
1253 | { |
1254 | struct gpii *gpii = gchan->gpii; |
1255 | int ret; |
1256 | |
1257 | ret = gpi_send_cmd(gpii, gchan, gpi_cmd: GPI_CH_CMD_STOP); |
1258 | if (ret) { |
1259 | dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n" , |
1260 | TO_GPI_CMD_STR(GPI_CH_CMD_STOP), ret); |
1261 | return ret; |
1262 | } |
1263 | |
1264 | return 0; |
1265 | } |
1266 | |
1267 | /* allocate and configure the transfer channel */ |
1268 | static int gpi_alloc_chan(struct gchan *chan, bool send_alloc_cmd) |
1269 | { |
1270 | struct gpii *gpii = chan->gpii; |
1271 | struct gpi_ring *ring = &chan->ch_ring; |
1272 | int ret; |
1273 | u32 id = gpii->gpii_id; |
1274 | u32 chid = chan->chid; |
1275 | u32 pair_chid = !chid; |
1276 | |
1277 | if (send_alloc_cmd) { |
1278 | ret = gpi_send_cmd(gpii, gchan: chan, gpi_cmd: GPI_CH_CMD_ALLOCATE); |
1279 | if (ret) { |
1280 | dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n" , |
1281 | TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE), ret); |
1282 | return ret; |
1283 | } |
1284 | } |
1285 | |
1286 | gpi_write_reg(gpii, addr: chan->ch_cntxt_base_reg + CNTXT_0_CONFIG, |
1287 | GPII_n_CH_k_CNTXT_0(ring->el_size, 0, chan->dir, GPI_CHTYPE_PROTO_GPI)); |
1288 | gpi_write_reg(gpii, addr: chan->ch_cntxt_base_reg + CNTXT_1_R_LENGTH, val: ring->len); |
1289 | gpi_write_reg(gpii, addr: chan->ch_cntxt_base_reg + CNTXT_2_RING_BASE_LSB, val: ring->phys_addr); |
1290 | gpi_write_reg(gpii, addr: chan->ch_cntxt_base_reg + CNTXT_3_RING_BASE_MSB, |
1291 | upper_32_bits(ring->phys_addr)); |
1292 | gpi_write_reg(gpii, addr: chan->ch_cntxt_db_reg + CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB, |
1293 | upper_32_bits(ring->phys_addr)); |
1294 | gpi_write_reg(gpii, addr: gpii->regs + GPII_n_CH_k_SCRATCH_0_OFFS(id, chid), |
1295 | GPII_n_CH_k_SCRATCH_0(pair_chid, chan->protocol, chan->seid)); |
1296 | gpi_write_reg(gpii, addr: gpii->regs + GPII_n_CH_k_SCRATCH_1_OFFS(id, chid), val: 0); |
1297 | gpi_write_reg(gpii, addr: gpii->regs + GPII_n_CH_k_SCRATCH_2_OFFS(id, chid), val: 0); |
1298 | gpi_write_reg(gpii, addr: gpii->regs + GPII_n_CH_k_SCRATCH_3_OFFS(id, chid), val: 0); |
1299 | gpi_write_reg(gpii, addr: gpii->regs + GPII_n_CH_k_QOS_OFFS(id, chid), val: 1); |
1300 | |
1301 | /* flush all the writes */ |
1302 | wmb(); |
1303 | return 0; |
1304 | } |
1305 | |
1306 | /* allocate and configure event ring */ |
1307 | static int gpi_alloc_ev_chan(struct gpii *gpii) |
1308 | { |
1309 | struct gpi_ring *ring = &gpii->ev_ring; |
1310 | void __iomem *base = gpii->ev_cntxt_base_reg; |
1311 | int ret; |
1312 | |
1313 | ret = gpi_send_cmd(gpii, NULL, gpi_cmd: GPI_EV_CMD_ALLOCATE); |
1314 | if (ret) { |
1315 | dev_err(gpii->gpi_dev->dev, "error with cmd:%s ret:%d\n" , |
1316 | TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE), ret); |
1317 | return ret; |
1318 | } |
1319 | |
1320 | /* program event context */ |
1321 | gpi_write_reg(gpii, addr: base + CNTXT_0_CONFIG, |
1322 | GPII_n_EV_k_CNTXT_0(ring->el_size, GPI_INTTYPE_IRQ, GPI_CHTYPE_GPI_EV)); |
1323 | gpi_write_reg(gpii, addr: base + CNTXT_1_R_LENGTH, val: ring->len); |
1324 | gpi_write_reg(gpii, addr: base + CNTXT_2_RING_BASE_LSB, lower_32_bits(ring->phys_addr)); |
1325 | gpi_write_reg(gpii, addr: base + CNTXT_3_RING_BASE_MSB, upper_32_bits(ring->phys_addr)); |
1326 | gpi_write_reg(gpii, addr: gpii->ev_cntxt_db_reg + CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB, |
1327 | upper_32_bits(ring->phys_addr)); |
1328 | gpi_write_reg(gpii, addr: base + CNTXT_8_RING_INT_MOD, val: 0); |
1329 | gpi_write_reg(gpii, addr: base + CNTXT_10_RING_MSI_LSB, val: 0); |
1330 | gpi_write_reg(gpii, addr: base + CNTXT_11_RING_MSI_MSB, val: 0); |
1331 | gpi_write_reg(gpii, addr: base + CNTXT_8_RING_INT_MOD, val: 0); |
1332 | gpi_write_reg(gpii, addr: base + CNTXT_12_RING_RP_UPDATE_LSB, val: 0); |
1333 | gpi_write_reg(gpii, addr: base + CNTXT_13_RING_RP_UPDATE_MSB, val: 0); |
1334 | |
1335 | /* add events to ring */ |
1336 | ring->wp = (ring->base + ring->len - ring->el_size); |
1337 | |
1338 | /* flush all the writes */ |
1339 | wmb(); |
1340 | |
1341 | /* gpii is active now */ |
1342 | write_lock_irq(&gpii->pm_lock); |
1343 | gpii->pm_state = ACTIVE_STATE; |
1344 | write_unlock_irq(&gpii->pm_lock); |
1345 | gpi_write_ev_db(gpii, ring, wp: ring->wp); |
1346 | |
1347 | return 0; |
1348 | } |
1349 | |
1350 | /* calculate # of ERE/TRE available to queue */ |
1351 | static int gpi_ring_num_elements_avail(const struct gpi_ring * const ring) |
1352 | { |
1353 | int elements = 0; |
1354 | |
1355 | if (ring->wp < ring->rp) { |
1356 | elements = ((ring->rp - ring->wp) / ring->el_size) - 1; |
1357 | } else { |
1358 | elements = (ring->rp - ring->base) / ring->el_size; |
1359 | elements += ((ring->base + ring->len - ring->wp) / ring->el_size) - 1; |
1360 | } |
1361 | |
1362 | return elements; |
1363 | } |
1364 | |
1365 | static int gpi_ring_add_element(struct gpi_ring *ring, void **wp) |
1366 | { |
1367 | if (gpi_ring_num_elements_avail(ring) <= 0) |
1368 | return -ENOMEM; |
1369 | |
1370 | *wp = ring->wp; |
1371 | ring->wp += ring->el_size; |
1372 | if (ring->wp >= (ring->base + ring->len)) |
1373 | ring->wp = ring->base; |
1374 | |
1375 | /* visible to other cores */ |
1376 | smp_wmb(); |
1377 | |
1378 | return 0; |
1379 | } |
1380 | |
1381 | static void gpi_ring_recycle_ev_element(struct gpi_ring *ring) |
1382 | { |
1383 | /* Update the WP */ |
1384 | ring->wp += ring->el_size; |
1385 | if (ring->wp >= (ring->base + ring->len)) |
1386 | ring->wp = ring->base; |
1387 | |
1388 | /* Update the RP */ |
1389 | ring->rp += ring->el_size; |
1390 | if (ring->rp >= (ring->base + ring->len)) |
1391 | ring->rp = ring->base; |
1392 | |
1393 | /* visible to other cores */ |
1394 | smp_wmb(); |
1395 | } |
1396 | |
1397 | static void gpi_free_ring(struct gpi_ring *ring, |
1398 | struct gpii *gpii) |
1399 | { |
1400 | dma_free_coherent(dev: gpii->gpi_dev->dev, size: ring->alloc_size, |
1401 | cpu_addr: ring->pre_aligned, dma_handle: ring->dma_handle); |
1402 | memset(ring, 0, sizeof(*ring)); |
1403 | } |
1404 | |
1405 | /* allocate memory for transfer and event rings */ |
1406 | static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements, |
1407 | u32 el_size, struct gpii *gpii) |
1408 | { |
1409 | u64 len = elements * el_size; |
1410 | int bit; |
1411 | |
1412 | /* ring len must be power of 2 */ |
1413 | bit = find_last_bit(addr: (unsigned long *)&len, size: 32); |
1414 | if (((1 << bit) - 1) & len) |
1415 | bit++; |
1416 | len = 1 << bit; |
1417 | ring->alloc_size = (len + (len - 1)); |
1418 | dev_dbg(gpii->gpi_dev->dev, |
1419 | "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%zu\n" , |
1420 | elements, el_size, (elements * el_size), len, |
1421 | ring->alloc_size); |
1422 | |
1423 | ring->pre_aligned = dma_alloc_coherent(dev: gpii->gpi_dev->dev, |
1424 | size: ring->alloc_size, |
1425 | dma_handle: &ring->dma_handle, GFP_KERNEL); |
1426 | if (!ring->pre_aligned) { |
1427 | dev_err(gpii->gpi_dev->dev, "could not alloc size:%zu mem for ring\n" , |
1428 | ring->alloc_size); |
1429 | return -ENOMEM; |
1430 | } |
1431 | |
1432 | /* align the physical mem */ |
1433 | ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1); |
1434 | ring->base = ring->pre_aligned + (ring->phys_addr - ring->dma_handle); |
1435 | ring->rp = ring->base; |
1436 | ring->wp = ring->base; |
1437 | ring->len = len; |
1438 | ring->el_size = el_size; |
1439 | ring->elements = ring->len / ring->el_size; |
1440 | memset(ring->base, 0, ring->len); |
1441 | ring->configured = true; |
1442 | |
1443 | /* update to other cores */ |
1444 | smp_wmb(); |
1445 | |
1446 | dev_dbg(gpii->gpi_dev->dev, |
1447 | "phy_pre:%pad phy_alig:%pa len:%u el_size:%u elements:%u\n" , |
1448 | &ring->dma_handle, &ring->phys_addr, ring->len, |
1449 | ring->el_size, ring->elements); |
1450 | |
1451 | return 0; |
1452 | } |
1453 | |
1454 | /* copy tre into transfer ring */ |
1455 | static void gpi_queue_xfer(struct gpii *gpii, struct gchan *gchan, |
1456 | struct gpi_tre *gpi_tre, void **wp) |
1457 | { |
1458 | struct gpi_tre *ch_tre; |
1459 | int ret; |
1460 | |
1461 | /* get next tre location we can copy */ |
1462 | ret = gpi_ring_add_element(ring: &gchan->ch_ring, wp: (void **)&ch_tre); |
1463 | if (unlikely(ret)) { |
1464 | dev_err(gpii->gpi_dev->dev, "Error adding ring element to xfer ring\n" ); |
1465 | return; |
1466 | } |
1467 | |
1468 | /* copy the tre info */ |
1469 | memcpy(ch_tre, gpi_tre, sizeof(*ch_tre)); |
1470 | *wp = ch_tre; |
1471 | } |
1472 | |
1473 | /* reset and restart transfer channel */ |
1474 | static int gpi_terminate_all(struct dma_chan *chan) |
1475 | { |
1476 | struct gchan *gchan = to_gchan(dma_chan: chan); |
1477 | struct gpii *gpii = gchan->gpii; |
1478 | int schid, echid, i; |
1479 | int ret = 0; |
1480 | |
1481 | mutex_lock(&gpii->ctrl_lock); |
1482 | |
1483 | /* |
1484 | * treat both channels as a group if its protocol is not UART |
1485 | * STOP, RESET, or START needs to be in lockstep |
1486 | */ |
1487 | schid = (gchan->protocol == QCOM_GPI_UART) ? gchan->chid : 0; |
1488 | echid = (gchan->protocol == QCOM_GPI_UART) ? schid + 1 : MAX_CHANNELS_PER_GPII; |
1489 | |
1490 | /* stop the channel */ |
1491 | for (i = schid; i < echid; i++) { |
1492 | gchan = &gpii->gchan[i]; |
1493 | |
1494 | /* disable ch state so no more TRE processing */ |
1495 | write_lock_irq(&gpii->pm_lock); |
1496 | gchan->pm_state = PREPARE_TERMINATE; |
1497 | write_unlock_irq(&gpii->pm_lock); |
1498 | |
1499 | /* send command to Stop the channel */ |
1500 | ret = gpi_stop_chan(gchan); |
1501 | } |
1502 | |
1503 | /* reset the channels (clears any pending tre) */ |
1504 | for (i = schid; i < echid; i++) { |
1505 | gchan = &gpii->gchan[i]; |
1506 | |
1507 | ret = gpi_reset_chan(gchan, gpi_cmd: GPI_CH_CMD_RESET); |
1508 | if (ret) { |
1509 | dev_err(gpii->gpi_dev->dev, "Error resetting channel ret:%d\n" , ret); |
1510 | goto terminate_exit; |
1511 | } |
1512 | |
1513 | /* reprogram channel CNTXT */ |
1514 | ret = gpi_alloc_chan(chan: gchan, send_alloc_cmd: false); |
1515 | if (ret) { |
1516 | dev_err(gpii->gpi_dev->dev, "Error alloc_channel ret:%d\n" , ret); |
1517 | goto terminate_exit; |
1518 | } |
1519 | } |
1520 | |
1521 | /* restart the channels */ |
1522 | for (i = schid; i < echid; i++) { |
1523 | gchan = &gpii->gchan[i]; |
1524 | |
1525 | ret = gpi_start_chan(gchan); |
1526 | if (ret) { |
1527 | dev_err(gpii->gpi_dev->dev, "Error Starting Channel ret:%d\n" , ret); |
1528 | goto terminate_exit; |
1529 | } |
1530 | } |
1531 | |
1532 | terminate_exit: |
1533 | mutex_unlock(lock: &gpii->ctrl_lock); |
1534 | return ret; |
1535 | } |
1536 | |
1537 | /* pause dma transfer for all channels */ |
1538 | static int gpi_pause(struct dma_chan *chan) |
1539 | { |
1540 | struct gchan *gchan = to_gchan(dma_chan: chan); |
1541 | struct gpii *gpii = gchan->gpii; |
1542 | int i, ret; |
1543 | |
1544 | mutex_lock(&gpii->ctrl_lock); |
1545 | |
1546 | /* |
1547 | * pause/resume are per gpii not per channel, so |
1548 | * client needs to call pause only once |
1549 | */ |
1550 | if (gpii->pm_state == PAUSE_STATE) { |
1551 | dev_dbg(gpii->gpi_dev->dev, "channel is already paused\n" ); |
1552 | mutex_unlock(lock: &gpii->ctrl_lock); |
1553 | return 0; |
1554 | } |
1555 | |
1556 | /* send stop command to stop the channels */ |
1557 | for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) { |
1558 | ret = gpi_stop_chan(gchan: &gpii->gchan[i]); |
1559 | if (ret) { |
1560 | mutex_unlock(lock: &gpii->ctrl_lock); |
1561 | return ret; |
1562 | } |
1563 | } |
1564 | |
1565 | disable_irq(irq: gpii->irq); |
1566 | |
1567 | /* Wait for threads to complete out */ |
1568 | tasklet_kill(t: &gpii->ev_task); |
1569 | |
1570 | write_lock_irq(&gpii->pm_lock); |
1571 | gpii->pm_state = PAUSE_STATE; |
1572 | write_unlock_irq(&gpii->pm_lock); |
1573 | mutex_unlock(lock: &gpii->ctrl_lock); |
1574 | |
1575 | return 0; |
1576 | } |
1577 | |
1578 | /* resume dma transfer */ |
1579 | static int gpi_resume(struct dma_chan *chan) |
1580 | { |
1581 | struct gchan *gchan = to_gchan(dma_chan: chan); |
1582 | struct gpii *gpii = gchan->gpii; |
1583 | int i, ret; |
1584 | |
1585 | mutex_lock(&gpii->ctrl_lock); |
1586 | if (gpii->pm_state == ACTIVE_STATE) { |
1587 | dev_dbg(gpii->gpi_dev->dev, "channel is already active\n" ); |
1588 | mutex_unlock(lock: &gpii->ctrl_lock); |
1589 | return 0; |
1590 | } |
1591 | |
1592 | enable_irq(irq: gpii->irq); |
1593 | |
1594 | /* send start command to start the channels */ |
1595 | for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) { |
1596 | ret = gpi_send_cmd(gpii, gchan: &gpii->gchan[i], gpi_cmd: GPI_CH_CMD_START); |
1597 | if (ret) { |
1598 | dev_err(gpii->gpi_dev->dev, "Error starting chan, ret:%d\n" , ret); |
1599 | mutex_unlock(lock: &gpii->ctrl_lock); |
1600 | return ret; |
1601 | } |
1602 | } |
1603 | |
1604 | write_lock_irq(&gpii->pm_lock); |
1605 | gpii->pm_state = ACTIVE_STATE; |
1606 | write_unlock_irq(&gpii->pm_lock); |
1607 | mutex_unlock(lock: &gpii->ctrl_lock); |
1608 | |
1609 | return 0; |
1610 | } |
1611 | |
1612 | static void gpi_desc_free(struct virt_dma_desc *vd) |
1613 | { |
1614 | struct gpi_desc *gpi_desc = to_gpi_desc(vd); |
1615 | |
1616 | kfree(objp: gpi_desc); |
1617 | gpi_desc = NULL; |
1618 | } |
1619 | |
1620 | static int |
1621 | gpi_peripheral_config(struct dma_chan *chan, struct dma_slave_config *config) |
1622 | { |
1623 | struct gchan *gchan = to_gchan(dma_chan: chan); |
1624 | |
1625 | if (!config->peripheral_config) |
1626 | return -EINVAL; |
1627 | |
1628 | gchan->config = krealloc(objp: gchan->config, new_size: config->peripheral_size, GFP_NOWAIT); |
1629 | if (!gchan->config) |
1630 | return -ENOMEM; |
1631 | |
1632 | memcpy(gchan->config, config->peripheral_config, config->peripheral_size); |
1633 | |
1634 | return 0; |
1635 | } |
1636 | |
1637 | static int gpi_create_i2c_tre(struct gchan *chan, struct gpi_desc *desc, |
1638 | struct scatterlist *sgl, enum dma_transfer_direction direction) |
1639 | { |
1640 | struct gpi_i2c_config *i2c = chan->config; |
1641 | struct device *dev = chan->gpii->gpi_dev->dev; |
1642 | unsigned int tre_idx = 0; |
1643 | dma_addr_t address; |
1644 | struct gpi_tre *tre; |
1645 | unsigned int i; |
1646 | |
1647 | /* first create config tre if applicable */ |
1648 | if (i2c->set_config) { |
1649 | tre = &desc->tre[tre_idx]; |
1650 | tre_idx++; |
1651 | |
1652 | tre->dword[0] = u32_encode_bits(v: i2c->low_count, TRE_I2C_C0_TLOW); |
1653 | tre->dword[0] |= u32_encode_bits(v: i2c->high_count, TRE_I2C_C0_THIGH); |
1654 | tre->dword[0] |= u32_encode_bits(v: i2c->cycle_count, TRE_I2C_C0_TCYL); |
1655 | tre->dword[0] |= u32_encode_bits(v: i2c->pack_enable, TRE_I2C_C0_TX_PACK); |
1656 | tre->dword[0] |= u32_encode_bits(v: i2c->pack_enable, TRE_I2C_C0_RX_PACK); |
1657 | |
1658 | tre->dword[1] = 0; |
1659 | |
1660 | tre->dword[2] = u32_encode_bits(v: i2c->clk_div, TRE_C0_CLK_DIV); |
1661 | |
1662 | tre->dword[3] = u32_encode_bits(TRE_TYPE_CONFIG0, TRE_FLAGS_TYPE); |
1663 | tre->dword[3] |= u32_encode_bits(v: 1, TRE_FLAGS_CHAIN); |
1664 | } |
1665 | |
1666 | /* create the GO tre for Tx */ |
1667 | if (i2c->op == I2C_WRITE) { |
1668 | tre = &desc->tre[tre_idx]; |
1669 | tre_idx++; |
1670 | |
1671 | if (i2c->multi_msg) |
1672 | tre->dword[0] = u32_encode_bits(v: I2C_READ, TRE_I2C_GO_CMD); |
1673 | else |
1674 | tre->dword[0] = u32_encode_bits(v: i2c->op, TRE_I2C_GO_CMD); |
1675 | |
1676 | tre->dword[0] |= u32_encode_bits(v: i2c->addr, TRE_I2C_GO_ADDR); |
1677 | tre->dword[0] |= u32_encode_bits(v: i2c->stretch, TRE_I2C_GO_STRETCH); |
1678 | |
1679 | tre->dword[1] = 0; |
1680 | tre->dword[2] = u32_encode_bits(v: i2c->rx_len, TRE_RX_LEN); |
1681 | |
1682 | tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE); |
1683 | |
1684 | if (i2c->multi_msg) |
1685 | tre->dword[3] |= u32_encode_bits(v: 1, TRE_FLAGS_LINK); |
1686 | else |
1687 | tre->dword[3] |= u32_encode_bits(v: 1, TRE_FLAGS_CHAIN); |
1688 | } |
1689 | |
1690 | if (i2c->op == I2C_READ || i2c->multi_msg == false) { |
1691 | /* create the DMA TRE */ |
1692 | tre = &desc->tre[tre_idx]; |
1693 | tre_idx++; |
1694 | |
1695 | address = sg_dma_address(sgl); |
1696 | tre->dword[0] = lower_32_bits(address); |
1697 | tre->dword[1] = upper_32_bits(address); |
1698 | |
1699 | tre->dword[2] = u32_encode_bits(sg_dma_len(sgl), TRE_DMA_LEN); |
1700 | |
1701 | tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE); |
1702 | tre->dword[3] |= u32_encode_bits(v: 1, TRE_FLAGS_IEOT); |
1703 | } |
1704 | |
1705 | for (i = 0; i < tre_idx; i++) |
1706 | dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n" , i, desc->tre[i].dword[0], |
1707 | desc->tre[i].dword[1], desc->tre[i].dword[2], desc->tre[i].dword[3]); |
1708 | |
1709 | return tre_idx; |
1710 | } |
1711 | |
1712 | static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc, |
1713 | struct scatterlist *sgl, enum dma_transfer_direction direction) |
1714 | { |
1715 | struct gpi_spi_config *spi = chan->config; |
1716 | struct device *dev = chan->gpii->gpi_dev->dev; |
1717 | unsigned int tre_idx = 0; |
1718 | dma_addr_t address; |
1719 | struct gpi_tre *tre; |
1720 | unsigned int i; |
1721 | |
1722 | /* first create config tre if applicable */ |
1723 | if (direction == DMA_MEM_TO_DEV && spi->set_config) { |
1724 | tre = &desc->tre[tre_idx]; |
1725 | tre_idx++; |
1726 | |
1727 | tre->dword[0] = u32_encode_bits(v: spi->word_len, TRE_SPI_C0_WORD_SZ); |
1728 | tre->dword[0] |= u32_encode_bits(v: spi->loopback_en, TRE_SPI_C0_LOOPBACK); |
1729 | tre->dword[0] |= u32_encode_bits(v: spi->clock_pol_high, TRE_SPI_C0_CPOL); |
1730 | tre->dword[0] |= u32_encode_bits(v: spi->data_pol_high, TRE_SPI_C0_CPHA); |
1731 | tre->dword[0] |= u32_encode_bits(v: spi->pack_en, TRE_SPI_C0_TX_PACK); |
1732 | tre->dword[0] |= u32_encode_bits(v: spi->pack_en, TRE_SPI_C0_RX_PACK); |
1733 | |
1734 | tre->dword[1] = 0; |
1735 | |
1736 | tre->dword[2] = u32_encode_bits(v: spi->clk_div, TRE_C0_CLK_DIV); |
1737 | tre->dword[2] |= u32_encode_bits(v: spi->clk_src, TRE_C0_CLK_SRC); |
1738 | |
1739 | tre->dword[3] = u32_encode_bits(TRE_TYPE_CONFIG0, TRE_FLAGS_TYPE); |
1740 | tre->dword[3] |= u32_encode_bits(v: 1, TRE_FLAGS_CHAIN); |
1741 | } |
1742 | |
1743 | /* create the GO tre for Tx */ |
1744 | if (direction == DMA_MEM_TO_DEV) { |
1745 | tre = &desc->tre[tre_idx]; |
1746 | tre_idx++; |
1747 | |
1748 | tre->dword[0] = u32_encode_bits(v: spi->fragmentation, TRE_SPI_GO_FRAG); |
1749 | tre->dword[0] |= u32_encode_bits(v: spi->cs, TRE_SPI_GO_CS); |
1750 | tre->dword[0] |= u32_encode_bits(v: spi->cmd, TRE_SPI_GO_CMD); |
1751 | |
1752 | tre->dword[1] = 0; |
1753 | |
1754 | tre->dword[2] = u32_encode_bits(v: spi->rx_len, TRE_RX_LEN); |
1755 | |
1756 | tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE); |
1757 | if (spi->cmd == SPI_RX) { |
1758 | tre->dword[3] |= u32_encode_bits(v: 1, TRE_FLAGS_IEOB); |
1759 | tre->dword[3] |= u32_encode_bits(v: 1, TRE_FLAGS_LINK); |
1760 | } else if (spi->cmd == SPI_TX) { |
1761 | tre->dword[3] |= u32_encode_bits(v: 1, TRE_FLAGS_CHAIN); |
1762 | } else { /* SPI_DUPLEX */ |
1763 | tre->dword[3] |= u32_encode_bits(v: 1, TRE_FLAGS_CHAIN); |
1764 | tre->dword[3] |= u32_encode_bits(v: 1, TRE_FLAGS_LINK); |
1765 | } |
1766 | } |
1767 | |
1768 | /* create the dma tre */ |
1769 | tre = &desc->tre[tre_idx]; |
1770 | tre_idx++; |
1771 | |
1772 | address = sg_dma_address(sgl); |
1773 | tre->dword[0] = lower_32_bits(address); |
1774 | tre->dword[1] = upper_32_bits(address); |
1775 | |
1776 | tre->dword[2] = u32_encode_bits(sg_dma_len(sgl), TRE_DMA_LEN); |
1777 | |
1778 | tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE); |
1779 | if (direction == DMA_MEM_TO_DEV) |
1780 | tre->dword[3] |= u32_encode_bits(v: 1, TRE_FLAGS_IEOT); |
1781 | |
1782 | for (i = 0; i < tre_idx; i++) |
1783 | dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n" , i, desc->tre[i].dword[0], |
1784 | desc->tre[i].dword[1], desc->tre[i].dword[2], desc->tre[i].dword[3]); |
1785 | |
1786 | return tre_idx; |
1787 | } |
1788 | |
1789 | /* copy tre into transfer ring */ |
1790 | static struct dma_async_tx_descriptor * |
1791 | gpi_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
1792 | unsigned int sg_len, enum dma_transfer_direction direction, |
1793 | unsigned long flags, void *context) |
1794 | { |
1795 | struct gchan *gchan = to_gchan(dma_chan: chan); |
1796 | struct gpii *gpii = gchan->gpii; |
1797 | struct device *dev = gpii->gpi_dev->dev; |
1798 | struct gpi_ring *ch_ring = &gchan->ch_ring; |
1799 | struct gpi_desc *gpi_desc; |
1800 | u32 nr, nr_tre = 0; |
1801 | u8 set_config; |
1802 | int i; |
1803 | |
1804 | gpii->ieob_set = false; |
1805 | if (!is_slave_direction(direction)) { |
1806 | dev_err(gpii->gpi_dev->dev, "invalid dma direction: %d\n" , direction); |
1807 | return NULL; |
1808 | } |
1809 | |
1810 | if (sg_len > 1) { |
1811 | dev_err(dev, "Multi sg sent, we support only one atm: %d\n" , sg_len); |
1812 | return NULL; |
1813 | } |
1814 | |
1815 | nr_tre = 3; |
1816 | set_config = *(u32 *)gchan->config; |
1817 | if (!set_config) |
1818 | nr_tre = 2; |
1819 | if (direction == DMA_DEV_TO_MEM) /* rx */ |
1820 | nr_tre = 1; |
1821 | |
1822 | /* calculate # of elements required & available */ |
1823 | nr = gpi_ring_num_elements_avail(ring: ch_ring); |
1824 | if (nr < nr_tre) { |
1825 | dev_err(dev, "not enough space in ring, avail:%u required:%u\n" , nr, nr_tre); |
1826 | return NULL; |
1827 | } |
1828 | |
1829 | gpi_desc = kzalloc(size: sizeof(*gpi_desc), GFP_NOWAIT); |
1830 | if (!gpi_desc) |
1831 | return NULL; |
1832 | |
1833 | /* create TREs for xfer */ |
1834 | if (gchan->protocol == QCOM_GPI_SPI) { |
1835 | i = gpi_create_spi_tre(chan: gchan, desc: gpi_desc, sgl, direction); |
1836 | } else if (gchan->protocol == QCOM_GPI_I2C) { |
1837 | i = gpi_create_i2c_tre(chan: gchan, desc: gpi_desc, sgl, direction); |
1838 | } else { |
1839 | dev_err(dev, "invalid peripheral: %d\n" , gchan->protocol); |
1840 | kfree(objp: gpi_desc); |
1841 | return NULL; |
1842 | } |
1843 | |
1844 | /* set up the descriptor */ |
1845 | gpi_desc->gchan = gchan; |
1846 | gpi_desc->len = sg_dma_len(sgl); |
1847 | gpi_desc->num_tre = i; |
1848 | |
1849 | return vchan_tx_prep(vc: &gchan->vc, vd: &gpi_desc->vd, tx_flags: flags); |
1850 | } |
1851 | |
1852 | /* rings transfer ring db to being transfer */ |
1853 | static void gpi_issue_pending(struct dma_chan *chan) |
1854 | { |
1855 | struct gchan *gchan = to_gchan(dma_chan: chan); |
1856 | struct gpii *gpii = gchan->gpii; |
1857 | unsigned long flags, pm_lock_flags; |
1858 | struct virt_dma_desc *vd = NULL; |
1859 | struct gpi_desc *gpi_desc; |
1860 | struct gpi_ring *ch_ring = &gchan->ch_ring; |
1861 | void *tre, *wp = NULL; |
1862 | int i; |
1863 | |
1864 | read_lock_irqsave(&gpii->pm_lock, pm_lock_flags); |
1865 | |
1866 | /* move all submitted discriptors to issued list */ |
1867 | spin_lock_irqsave(&gchan->vc.lock, flags); |
1868 | if (vchan_issue_pending(vc: &gchan->vc)) |
1869 | vd = list_last_entry(&gchan->vc.desc_issued, |
1870 | struct virt_dma_desc, node); |
1871 | spin_unlock_irqrestore(lock: &gchan->vc.lock, flags); |
1872 | |
1873 | /* nothing to do list is empty */ |
1874 | if (!vd) { |
1875 | read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags); |
1876 | return; |
1877 | } |
1878 | |
1879 | gpi_desc = to_gpi_desc(vd); |
1880 | for (i = 0; i < gpi_desc->num_tre; i++) { |
1881 | tre = &gpi_desc->tre[i]; |
1882 | gpi_queue_xfer(gpii, gchan, gpi_tre: tre, wp: &wp); |
1883 | } |
1884 | |
1885 | gpi_desc->db = ch_ring->wp; |
1886 | gpi_write_ch_db(gchan, ring: &gchan->ch_ring, wp: gpi_desc->db); |
1887 | read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags); |
1888 | } |
1889 | |
1890 | static int gpi_ch_init(struct gchan *gchan) |
1891 | { |
1892 | struct gpii *gpii = gchan->gpii; |
1893 | const int ev_factor = gpii->gpi_dev->ev_factor; |
1894 | u32 elements; |
1895 | int i = 0, ret = 0; |
1896 | |
1897 | gchan->pm_state = CONFIG_STATE; |
1898 | |
1899 | /* check if both channels are configured before continue */ |
1900 | for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) |
1901 | if (gpii->gchan[i].pm_state != CONFIG_STATE) |
1902 | goto exit_gpi_init; |
1903 | |
1904 | /* protocol must be same for both channels */ |
1905 | if (gpii->gchan[0].protocol != gpii->gchan[1].protocol) { |
1906 | dev_err(gpii->gpi_dev->dev, "protocol did not match protocol %u != %u\n" , |
1907 | gpii->gchan[0].protocol, gpii->gchan[1].protocol); |
1908 | ret = -EINVAL; |
1909 | goto exit_gpi_init; |
1910 | } |
1911 | |
1912 | /* allocate memory for event ring */ |
1913 | elements = CHAN_TRES << ev_factor; |
1914 | ret = gpi_alloc_ring(ring: &gpii->ev_ring, elements, |
1915 | el_size: sizeof(union gpi_event), gpii); |
1916 | if (ret) |
1917 | goto exit_gpi_init; |
1918 | |
1919 | /* configure interrupts */ |
1920 | write_lock_irq(&gpii->pm_lock); |
1921 | gpii->pm_state = PREPARE_HARDWARE; |
1922 | write_unlock_irq(&gpii->pm_lock); |
1923 | ret = gpi_config_interrupts(gpii, settings: DEFAULT_IRQ_SETTINGS, mask: 0); |
1924 | if (ret) { |
1925 | dev_err(gpii->gpi_dev->dev, "error config. interrupts, ret:%d\n" , ret); |
1926 | goto error_config_int; |
1927 | } |
1928 | |
1929 | /* allocate event rings */ |
1930 | ret = gpi_alloc_ev_chan(gpii); |
1931 | if (ret) { |
1932 | dev_err(gpii->gpi_dev->dev, "error alloc_ev_chan:%d\n" , ret); |
1933 | goto error_alloc_ev_ring; |
1934 | } |
1935 | |
1936 | /* Allocate all channels */ |
1937 | for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) { |
1938 | ret = gpi_alloc_chan(chan: &gpii->gchan[i], send_alloc_cmd: true); |
1939 | if (ret) { |
1940 | dev_err(gpii->gpi_dev->dev, "Error allocating chan:%d\n" , ret); |
1941 | goto error_alloc_chan; |
1942 | } |
1943 | } |
1944 | |
1945 | /* start channels */ |
1946 | for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) { |
1947 | ret = gpi_start_chan(gchan: &gpii->gchan[i]); |
1948 | if (ret) { |
1949 | dev_err(gpii->gpi_dev->dev, "Error start chan:%d\n" , ret); |
1950 | goto error_start_chan; |
1951 | } |
1952 | } |
1953 | return ret; |
1954 | |
1955 | error_start_chan: |
1956 | for (i = i - 1; i >= 0; i--) { |
1957 | gpi_stop_chan(gchan: &gpii->gchan[i]); |
1958 | gpi_send_cmd(gpii, gchan, gpi_cmd: GPI_CH_CMD_RESET); |
1959 | } |
1960 | i = 2; |
1961 | error_alloc_chan: |
1962 | for (i = i - 1; i >= 0; i--) |
1963 | gpi_reset_chan(gchan, gpi_cmd: GPI_CH_CMD_DE_ALLOC); |
1964 | error_alloc_ev_ring: |
1965 | gpi_disable_interrupts(gpii); |
1966 | error_config_int: |
1967 | gpi_free_ring(ring: &gpii->ev_ring, gpii); |
1968 | exit_gpi_init: |
1969 | return ret; |
1970 | } |
1971 | |
1972 | /* release all channel resources */ |
1973 | static void gpi_free_chan_resources(struct dma_chan *chan) |
1974 | { |
1975 | struct gchan *gchan = to_gchan(dma_chan: chan); |
1976 | struct gpii *gpii = gchan->gpii; |
1977 | enum gpi_pm_state cur_state; |
1978 | int ret, i; |
1979 | |
1980 | mutex_lock(&gpii->ctrl_lock); |
1981 | |
1982 | cur_state = gchan->pm_state; |
1983 | |
1984 | /* disable ch state so no more TRE processing for this channel */ |
1985 | write_lock_irq(&gpii->pm_lock); |
1986 | gchan->pm_state = PREPARE_TERMINATE; |
1987 | write_unlock_irq(&gpii->pm_lock); |
1988 | |
1989 | /* attempt to do graceful hardware shutdown */ |
1990 | if (cur_state == ACTIVE_STATE) { |
1991 | gpi_stop_chan(gchan); |
1992 | |
1993 | ret = gpi_send_cmd(gpii, gchan, gpi_cmd: GPI_CH_CMD_RESET); |
1994 | if (ret) |
1995 | dev_err(gpii->gpi_dev->dev, "error resetting channel:%d\n" , ret); |
1996 | |
1997 | gpi_reset_chan(gchan, gpi_cmd: GPI_CH_CMD_DE_ALLOC); |
1998 | } |
1999 | |
2000 | /* free all allocated memory */ |
2001 | gpi_free_ring(ring: &gchan->ch_ring, gpii); |
2002 | vchan_free_chan_resources(vc: &gchan->vc); |
2003 | kfree(objp: gchan->config); |
2004 | |
2005 | write_lock_irq(&gpii->pm_lock); |
2006 | gchan->pm_state = DISABLE_STATE; |
2007 | write_unlock_irq(&gpii->pm_lock); |
2008 | |
2009 | /* if other rings are still active exit */ |
2010 | for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) |
2011 | if (gpii->gchan[i].ch_ring.configured) |
2012 | goto exit_free; |
2013 | |
2014 | /* deallocate EV Ring */ |
2015 | cur_state = gpii->pm_state; |
2016 | write_lock_irq(&gpii->pm_lock); |
2017 | gpii->pm_state = PREPARE_TERMINATE; |
2018 | write_unlock_irq(&gpii->pm_lock); |
2019 | |
2020 | /* wait for threads to complete out */ |
2021 | tasklet_kill(t: &gpii->ev_task); |
2022 | |
2023 | /* send command to de allocate event ring */ |
2024 | if (cur_state == ACTIVE_STATE) |
2025 | gpi_send_cmd(gpii, NULL, gpi_cmd: GPI_EV_CMD_DEALLOC); |
2026 | |
2027 | gpi_free_ring(ring: &gpii->ev_ring, gpii); |
2028 | |
2029 | /* disable interrupts */ |
2030 | if (cur_state == ACTIVE_STATE) |
2031 | gpi_disable_interrupts(gpii); |
2032 | |
2033 | /* set final state to disable */ |
2034 | write_lock_irq(&gpii->pm_lock); |
2035 | gpii->pm_state = DISABLE_STATE; |
2036 | write_unlock_irq(&gpii->pm_lock); |
2037 | |
2038 | exit_free: |
2039 | mutex_unlock(lock: &gpii->ctrl_lock); |
2040 | } |
2041 | |
2042 | /* allocate channel resources */ |
2043 | static int gpi_alloc_chan_resources(struct dma_chan *chan) |
2044 | { |
2045 | struct gchan *gchan = to_gchan(dma_chan: chan); |
2046 | struct gpii *gpii = gchan->gpii; |
2047 | int ret; |
2048 | |
2049 | mutex_lock(&gpii->ctrl_lock); |
2050 | |
2051 | /* allocate memory for transfer ring */ |
2052 | ret = gpi_alloc_ring(ring: &gchan->ch_ring, CHAN_TRES, |
2053 | el_size: sizeof(struct gpi_tre), gpii); |
2054 | if (ret) |
2055 | goto xfer_alloc_err; |
2056 | |
2057 | ret = gpi_ch_init(gchan); |
2058 | |
2059 | mutex_unlock(lock: &gpii->ctrl_lock); |
2060 | |
2061 | return ret; |
2062 | xfer_alloc_err: |
2063 | mutex_unlock(lock: &gpii->ctrl_lock); |
2064 | |
2065 | return ret; |
2066 | } |
2067 | |
2068 | static int gpi_find_avail_gpii(struct gpi_dev *gpi_dev, u32 seid) |
2069 | { |
2070 | struct gchan *tx_chan, *rx_chan; |
2071 | unsigned int gpii; |
2072 | |
2073 | /* check if same seid is already configured for another chid */ |
2074 | for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) { |
2075 | if (!((1 << gpii) & gpi_dev->gpii_mask)) |
2076 | continue; |
2077 | |
2078 | tx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_TX_CHAN]; |
2079 | rx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_RX_CHAN]; |
2080 | |
2081 | if (rx_chan->vc.chan.client_count && rx_chan->seid == seid) |
2082 | return gpii; |
2083 | if (tx_chan->vc.chan.client_count && tx_chan->seid == seid) |
2084 | return gpii; |
2085 | } |
2086 | |
2087 | /* no channels configured with same seid, return next avail gpii */ |
2088 | for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) { |
2089 | if (!((1 << gpii) & gpi_dev->gpii_mask)) |
2090 | continue; |
2091 | |
2092 | tx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_TX_CHAN]; |
2093 | rx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_RX_CHAN]; |
2094 | |
2095 | /* check if gpii is configured */ |
2096 | if (tx_chan->vc.chan.client_count || |
2097 | rx_chan->vc.chan.client_count) |
2098 | continue; |
2099 | |
2100 | /* found a free gpii */ |
2101 | return gpii; |
2102 | } |
2103 | |
2104 | /* no gpii instance available to use */ |
2105 | return -EIO; |
2106 | } |
2107 | |
2108 | /* gpi_of_dma_xlate: open client requested channel */ |
2109 | static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args, |
2110 | struct of_dma *of_dma) |
2111 | { |
2112 | struct gpi_dev *gpi_dev = (struct gpi_dev *)of_dma->of_dma_data; |
2113 | u32 seid, chid; |
2114 | int gpii; |
2115 | struct gchan *gchan; |
2116 | |
2117 | if (args->args_count < 3) { |
2118 | dev_err(gpi_dev->dev, "gpii require minimum 2 args, client passed:%d args\n" , |
2119 | args->args_count); |
2120 | return NULL; |
2121 | } |
2122 | |
2123 | chid = args->args[0]; |
2124 | if (chid >= MAX_CHANNELS_PER_GPII) { |
2125 | dev_err(gpi_dev->dev, "gpii channel:%d not valid\n" , chid); |
2126 | return NULL; |
2127 | } |
2128 | |
2129 | seid = args->args[1]; |
2130 | |
2131 | /* find next available gpii to use */ |
2132 | gpii = gpi_find_avail_gpii(gpi_dev, seid); |
2133 | if (gpii < 0) { |
2134 | dev_err(gpi_dev->dev, "no available gpii instances\n" ); |
2135 | return NULL; |
2136 | } |
2137 | |
2138 | gchan = &gpi_dev->gpiis[gpii].gchan[chid]; |
2139 | if (gchan->vc.chan.client_count) { |
2140 | dev_err(gpi_dev->dev, "gpii:%d chid:%d seid:%d already configured\n" , |
2141 | gpii, chid, gchan->seid); |
2142 | return NULL; |
2143 | } |
2144 | |
2145 | gchan->seid = seid; |
2146 | gchan->protocol = args->args[2]; |
2147 | |
2148 | return dma_get_slave_channel(chan: &gchan->vc.chan); |
2149 | } |
2150 | |
2151 | static int gpi_probe(struct platform_device *pdev) |
2152 | { |
2153 | struct gpi_dev *gpi_dev; |
2154 | unsigned int i; |
2155 | u32 ee_offset; |
2156 | int ret; |
2157 | |
2158 | gpi_dev = devm_kzalloc(dev: &pdev->dev, size: sizeof(*gpi_dev), GFP_KERNEL); |
2159 | if (!gpi_dev) |
2160 | return -ENOMEM; |
2161 | |
2162 | gpi_dev->dev = &pdev->dev; |
2163 | gpi_dev->regs = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: &gpi_dev->res); |
2164 | if (IS_ERR(ptr: gpi_dev->regs)) |
2165 | return PTR_ERR(ptr: gpi_dev->regs); |
2166 | gpi_dev->ee_base = gpi_dev->regs; |
2167 | |
2168 | ret = of_property_read_u32(np: gpi_dev->dev->of_node, propname: "dma-channels" , |
2169 | out_value: &gpi_dev->max_gpii); |
2170 | if (ret) { |
2171 | dev_err(gpi_dev->dev, "missing 'max-no-gpii' DT node\n" ); |
2172 | return ret; |
2173 | } |
2174 | |
2175 | ret = of_property_read_u32(np: gpi_dev->dev->of_node, propname: "dma-channel-mask" , |
2176 | out_value: &gpi_dev->gpii_mask); |
2177 | if (ret) { |
2178 | dev_err(gpi_dev->dev, "missing 'gpii-mask' DT node\n" ); |
2179 | return ret; |
2180 | } |
2181 | |
2182 | ee_offset = (uintptr_t)device_get_match_data(dev: gpi_dev->dev); |
2183 | gpi_dev->ee_base = gpi_dev->ee_base - ee_offset; |
2184 | |
2185 | gpi_dev->ev_factor = EV_FACTOR; |
2186 | |
2187 | ret = dma_set_mask(dev: gpi_dev->dev, DMA_BIT_MASK(64)); |
2188 | if (ret) { |
2189 | dev_err(gpi_dev->dev, "Error setting dma_mask to 64, ret:%d\n" , ret); |
2190 | return ret; |
2191 | } |
2192 | |
2193 | gpi_dev->gpiis = devm_kzalloc(dev: gpi_dev->dev, size: sizeof(*gpi_dev->gpiis) * |
2194 | gpi_dev->max_gpii, GFP_KERNEL); |
2195 | if (!gpi_dev->gpiis) |
2196 | return -ENOMEM; |
2197 | |
2198 | /* setup all the supported gpii */ |
2199 | INIT_LIST_HEAD(list: &gpi_dev->dma_device.channels); |
2200 | for (i = 0; i < gpi_dev->max_gpii; i++) { |
2201 | struct gpii *gpii = &gpi_dev->gpiis[i]; |
2202 | int chan; |
2203 | |
2204 | if (!((1 << i) & gpi_dev->gpii_mask)) |
2205 | continue; |
2206 | |
2207 | /* set up ev cntxt register map */ |
2208 | gpii->ev_cntxt_base_reg = gpi_dev->ee_base + GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0); |
2209 | gpii->ev_cntxt_db_reg = gpi_dev->ee_base + GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0); |
2210 | gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg + CNTXT_4_RING_RP_LSB; |
2211 | gpii->ev_cmd_reg = gpi_dev->ee_base + GPII_n_EV_CH_CMD_OFFS(i); |
2212 | gpii->ieob_clr_reg = gpi_dev->ee_base + GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i); |
2213 | |
2214 | /* set up irq */ |
2215 | ret = platform_get_irq(pdev, i); |
2216 | if (ret < 0) |
2217 | return ret; |
2218 | gpii->irq = ret; |
2219 | |
2220 | /* set up channel specific register info */ |
2221 | for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) { |
2222 | struct gchan *gchan = &gpii->gchan[chan]; |
2223 | |
2224 | /* set up ch cntxt register map */ |
2225 | gchan->ch_cntxt_base_reg = gpi_dev->ee_base + |
2226 | GPII_n_CH_k_CNTXT_0_OFFS(i, chan); |
2227 | gchan->ch_cntxt_db_reg = gpi_dev->ee_base + |
2228 | GPII_n_CH_k_DOORBELL_0_OFFS(i, chan); |
2229 | gchan->ch_cmd_reg = gpi_dev->ee_base + GPII_n_CH_CMD_OFFS(i); |
2230 | |
2231 | /* vchan setup */ |
2232 | vchan_init(vc: &gchan->vc, dmadev: &gpi_dev->dma_device); |
2233 | gchan->vc.desc_free = gpi_desc_free; |
2234 | gchan->chid = chan; |
2235 | gchan->gpii = gpii; |
2236 | gchan->dir = GPII_CHAN_DIR[chan]; |
2237 | } |
2238 | mutex_init(&gpii->ctrl_lock); |
2239 | rwlock_init(&gpii->pm_lock); |
2240 | tasklet_init(t: &gpii->ev_task, func: gpi_ev_tasklet, |
2241 | data: (unsigned long)gpii); |
2242 | init_completion(x: &gpii->cmd_completion); |
2243 | gpii->gpii_id = i; |
2244 | gpii->regs = gpi_dev->ee_base; |
2245 | gpii->gpi_dev = gpi_dev; |
2246 | } |
2247 | |
2248 | platform_set_drvdata(pdev, data: gpi_dev); |
2249 | |
2250 | /* clear and Set capabilities */ |
2251 | dma_cap_zero(gpi_dev->dma_device.cap_mask); |
2252 | dma_cap_set(DMA_SLAVE, gpi_dev->dma_device.cap_mask); |
2253 | |
2254 | /* configure dmaengine apis */ |
2255 | gpi_dev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
2256 | gpi_dev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; |
2257 | gpi_dev->dma_device.src_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES; |
2258 | gpi_dev->dma_device.dst_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES; |
2259 | gpi_dev->dma_device.device_alloc_chan_resources = gpi_alloc_chan_resources; |
2260 | gpi_dev->dma_device.device_free_chan_resources = gpi_free_chan_resources; |
2261 | gpi_dev->dma_device.device_tx_status = dma_cookie_status; |
2262 | gpi_dev->dma_device.device_issue_pending = gpi_issue_pending; |
2263 | gpi_dev->dma_device.device_prep_slave_sg = gpi_prep_slave_sg; |
2264 | gpi_dev->dma_device.device_config = gpi_peripheral_config; |
2265 | gpi_dev->dma_device.device_terminate_all = gpi_terminate_all; |
2266 | gpi_dev->dma_device.dev = gpi_dev->dev; |
2267 | gpi_dev->dma_device.device_pause = gpi_pause; |
2268 | gpi_dev->dma_device.device_resume = gpi_resume; |
2269 | |
2270 | /* register with dmaengine framework */ |
2271 | ret = dma_async_device_register(device: &gpi_dev->dma_device); |
2272 | if (ret) { |
2273 | dev_err(gpi_dev->dev, "async_device_register failed ret:%d" , ret); |
2274 | return ret; |
2275 | } |
2276 | |
2277 | ret = of_dma_controller_register(np: gpi_dev->dev->of_node, |
2278 | of_dma_xlate: gpi_of_dma_xlate, data: gpi_dev); |
2279 | if (ret) { |
2280 | dev_err(gpi_dev->dev, "of_dma_controller_reg failed ret:%d" , ret); |
2281 | return ret; |
2282 | } |
2283 | |
2284 | return ret; |
2285 | } |
2286 | |
2287 | static const struct of_device_id gpi_of_match[] = { |
2288 | { .compatible = "qcom,sdm845-gpi-dma" , .data = (void *)0x0 }, |
2289 | { .compatible = "qcom,sm6350-gpi-dma" , .data = (void *)0x10000 }, |
2290 | /* |
2291 | * Do not grow the list for compatible devices. Instead use |
2292 | * qcom,sdm845-gpi-dma (for ee_offset = 0x0) or qcom,sm6350-gpi-dma |
2293 | * (for ee_offset = 0x10000). |
2294 | */ |
2295 | { .compatible = "qcom,sc7280-gpi-dma" , .data = (void *)0x10000 }, |
2296 | { .compatible = "qcom,sm8150-gpi-dma" , .data = (void *)0x0 }, |
2297 | { .compatible = "qcom,sm8250-gpi-dma" , .data = (void *)0x0 }, |
2298 | { .compatible = "qcom,sm8350-gpi-dma" , .data = (void *)0x10000 }, |
2299 | { .compatible = "qcom,sm8450-gpi-dma" , .data = (void *)0x10000 }, |
2300 | { }, |
2301 | }; |
2302 | MODULE_DEVICE_TABLE(of, gpi_of_match); |
2303 | |
2304 | static struct platform_driver gpi_driver = { |
2305 | .probe = gpi_probe, |
2306 | .driver = { |
2307 | .name = KBUILD_MODNAME, |
2308 | .of_match_table = gpi_of_match, |
2309 | }, |
2310 | }; |
2311 | |
2312 | static int __init gpi_init(void) |
2313 | { |
2314 | return platform_driver_register(&gpi_driver); |
2315 | } |
2316 | subsys_initcall(gpi_init) |
2317 | |
2318 | MODULE_DESCRIPTION("QCOM GPI DMA engine driver" ); |
2319 | MODULE_LICENSE("GPL v2" ); |
2320 | |