1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) Ericsson AB 2007-2008 |
4 | * Copyright (C) ST-Ericsson SA 2008-2010 |
5 | * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson |
6 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson |
7 | */ |
8 | |
9 | #include <linux/dma-mapping.h> |
10 | #include <linux/kernel.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/export.h> |
13 | #include <linux/dmaengine.h> |
14 | #include <linux/platform_device.h> |
15 | #include <linux/clk.h> |
16 | #include <linux/delay.h> |
17 | #include <linux/log2.h> |
18 | #include <linux/pm.h> |
19 | #include <linux/pm_runtime.h> |
20 | #include <linux/err.h> |
21 | #include <linux/of.h> |
22 | #include <linux/of_address.h> |
23 | #include <linux/of_dma.h> |
24 | #include <linux/amba/bus.h> |
25 | #include <linux/regulator/consumer.h> |
26 | |
27 | #include "dmaengine.h" |
28 | #include "ste_dma40.h" |
29 | #include "ste_dma40_ll.h" |
30 | |
31 | /** |
32 | * struct stedma40_platform_data - Configuration struct for the dma device. |
33 | * |
34 | * @dev_tx: mapping between destination event line and io address |
35 | * @dev_rx: mapping between source event line and io address |
36 | * @disabled_channels: A vector, ending with -1, that marks physical channels |
37 | * that are for different reasons not available for the driver. |
38 | * @soft_lli_chans: A vector, that marks physical channels will use LLI by SW |
39 | * which avoids HW bug that exists in some versions of the controller. |
40 | * SoftLLI introduces relink overhead that could impact performace for |
41 | * certain use cases. |
42 | * @num_of_soft_lli_chans: The number of channels that needs to be configured |
43 | * to use SoftLLI. |
44 | * @use_esram_lcla: flag for mapping the lcla into esram region |
45 | * @num_of_memcpy_chans: The number of channels reserved for memcpy. |
46 | * @num_of_phy_chans: The number of physical channels implemented in HW. |
47 | * 0 means reading the number of channels from DMA HW but this is only valid |
48 | * for 'multiple of 4' channels, like 8. |
49 | */ |
50 | struct stedma40_platform_data { |
51 | int disabled_channels[STEDMA40_MAX_PHYS]; |
52 | int *soft_lli_chans; |
53 | int num_of_soft_lli_chans; |
54 | bool use_esram_lcla; |
55 | int num_of_memcpy_chans; |
56 | int num_of_phy_chans; |
57 | }; |
58 | |
59 | #define D40_NAME "dma40" |
60 | |
61 | #define D40_PHY_CHAN -1 |
62 | |
63 | /* For masking out/in 2 bit channel positions */ |
64 | #define D40_CHAN_POS(chan) (2 * (chan / 2)) |
65 | #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan)) |
66 | |
67 | /* Maximum iterations taken before giving up suspending a channel */ |
68 | #define D40_SUSPEND_MAX_IT 500 |
69 | |
70 | /* Milliseconds */ |
71 | #define DMA40_AUTOSUSPEND_DELAY 100 |
72 | |
73 | /* Hardware requirement on LCLA alignment */ |
74 | #define LCLA_ALIGNMENT 0x40000 |
75 | |
76 | /* Max number of links per event group */ |
77 | #define D40_LCLA_LINK_PER_EVENT_GRP 128 |
78 | #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP |
79 | |
80 | /* Max number of logical channels per physical channel */ |
81 | #define D40_MAX_LOG_CHAN_PER_PHY 32 |
82 | |
83 | /* Attempts before giving up to trying to get pages that are aligned */ |
84 | #define MAX_LCLA_ALLOC_ATTEMPTS 256 |
85 | |
86 | /* Bit markings for allocation map */ |
87 | #define D40_ALLOC_FREE BIT(31) |
88 | #define D40_ALLOC_PHY BIT(30) |
89 | #define D40_ALLOC_LOG_FREE 0 |
90 | |
91 | #define D40_MEMCPY_MAX_CHANS 8 |
92 | |
93 | /* Reserved event lines for memcpy only. */ |
94 | #define DB8500_DMA_MEMCPY_EV_0 51 |
95 | #define DB8500_DMA_MEMCPY_EV_1 56 |
96 | #define DB8500_DMA_MEMCPY_EV_2 57 |
97 | #define DB8500_DMA_MEMCPY_EV_3 58 |
98 | #define DB8500_DMA_MEMCPY_EV_4 59 |
99 | #define DB8500_DMA_MEMCPY_EV_5 60 |
100 | |
101 | static int dma40_memcpy_channels[] = { |
102 | DB8500_DMA_MEMCPY_EV_0, |
103 | DB8500_DMA_MEMCPY_EV_1, |
104 | DB8500_DMA_MEMCPY_EV_2, |
105 | DB8500_DMA_MEMCPY_EV_3, |
106 | DB8500_DMA_MEMCPY_EV_4, |
107 | DB8500_DMA_MEMCPY_EV_5, |
108 | }; |
109 | |
110 | /* Default configuration for physical memcpy */ |
111 | static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = { |
112 | .mode = STEDMA40_MODE_PHYSICAL, |
113 | .dir = DMA_MEM_TO_MEM, |
114 | |
115 | .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, |
116 | .src_info.psize = STEDMA40_PSIZE_PHY_1, |
117 | .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, |
118 | |
119 | .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, |
120 | .dst_info.psize = STEDMA40_PSIZE_PHY_1, |
121 | .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, |
122 | }; |
123 | |
124 | /* Default configuration for logical memcpy */ |
125 | static const struct stedma40_chan_cfg dma40_memcpy_conf_log = { |
126 | .mode = STEDMA40_MODE_LOGICAL, |
127 | .dir = DMA_MEM_TO_MEM, |
128 | |
129 | .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, |
130 | .src_info.psize = STEDMA40_PSIZE_LOG_1, |
131 | .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, |
132 | |
133 | .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, |
134 | .dst_info.psize = STEDMA40_PSIZE_LOG_1, |
135 | .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, |
136 | }; |
137 | |
138 | /** |
139 | * enum d40_command - The different commands and/or statuses. |
140 | * |
141 | * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, |
142 | * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. |
143 | * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible. |
144 | * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED. |
145 | */ |
146 | enum d40_command { |
147 | D40_DMA_STOP = 0, |
148 | D40_DMA_RUN = 1, |
149 | D40_DMA_SUSPEND_REQ = 2, |
150 | D40_DMA_SUSPENDED = 3 |
151 | }; |
152 | |
153 | /* |
154 | * enum d40_events - The different Event Enables for the event lines. |
155 | * |
156 | * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan. |
157 | * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan. |
158 | * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line. |
159 | * @D40_ROUND_EVENTLINE: Status check for event line. |
160 | */ |
161 | |
162 | enum d40_events { |
163 | D40_DEACTIVATE_EVENTLINE = 0, |
164 | D40_ACTIVATE_EVENTLINE = 1, |
165 | D40_SUSPEND_REQ_EVENTLINE = 2, |
166 | D40_ROUND_EVENTLINE = 3 |
167 | }; |
168 | |
169 | /* |
170 | * These are the registers that has to be saved and later restored |
171 | * when the DMA hw is powered off. |
172 | * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. |
173 | */ |
174 | static __maybe_unused u32 d40_backup_regs[] = { |
175 | D40_DREG_LCPA, |
176 | D40_DREG_LCLA, |
177 | D40_DREG_PRMSE, |
178 | D40_DREG_PRMSO, |
179 | D40_DREG_PRMOE, |
180 | D40_DREG_PRMOO, |
181 | }; |
182 | |
183 | #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) |
184 | |
185 | /* |
186 | * since 9540 and 8540 has the same HW revision |
187 | * use v4a for 9540 or ealier |
188 | * use v4b for 8540 or later |
189 | * HW revision: |
190 | * DB8500ed has revision 0 |
191 | * DB8500v1 has revision 2 |
192 | * DB8500v2 has revision 3 |
193 | * AP9540v1 has revision 4 |
194 | * DB8540v1 has revision 4 |
195 | * TODO: Check if all these registers have to be saved/restored on dma40 v4a |
196 | */ |
197 | static u32 d40_backup_regs_v4a[] = { |
198 | D40_DREG_PSEG1, |
199 | D40_DREG_PSEG2, |
200 | D40_DREG_PSEG3, |
201 | D40_DREG_PSEG4, |
202 | D40_DREG_PCEG1, |
203 | D40_DREG_PCEG2, |
204 | D40_DREG_PCEG3, |
205 | D40_DREG_PCEG4, |
206 | D40_DREG_RSEG1, |
207 | D40_DREG_RSEG2, |
208 | D40_DREG_RSEG3, |
209 | D40_DREG_RSEG4, |
210 | D40_DREG_RCEG1, |
211 | D40_DREG_RCEG2, |
212 | D40_DREG_RCEG3, |
213 | D40_DREG_RCEG4, |
214 | }; |
215 | |
216 | #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a) |
217 | |
218 | static u32 d40_backup_regs_v4b[] = { |
219 | D40_DREG_CPSEG1, |
220 | D40_DREG_CPSEG2, |
221 | D40_DREG_CPSEG3, |
222 | D40_DREG_CPSEG4, |
223 | D40_DREG_CPSEG5, |
224 | D40_DREG_CPCEG1, |
225 | D40_DREG_CPCEG2, |
226 | D40_DREG_CPCEG3, |
227 | D40_DREG_CPCEG4, |
228 | D40_DREG_CPCEG5, |
229 | D40_DREG_CRSEG1, |
230 | D40_DREG_CRSEG2, |
231 | D40_DREG_CRSEG3, |
232 | D40_DREG_CRSEG4, |
233 | D40_DREG_CRSEG5, |
234 | D40_DREG_CRCEG1, |
235 | D40_DREG_CRCEG2, |
236 | D40_DREG_CRCEG3, |
237 | D40_DREG_CRCEG4, |
238 | D40_DREG_CRCEG5, |
239 | }; |
240 | |
241 | #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b) |
242 | |
243 | static __maybe_unused u32 d40_backup_regs_chan[] = { |
244 | D40_CHAN_REG_SSCFG, |
245 | D40_CHAN_REG_SSELT, |
246 | D40_CHAN_REG_SSPTR, |
247 | D40_CHAN_REG_SSLNK, |
248 | D40_CHAN_REG_SDCFG, |
249 | D40_CHAN_REG_SDELT, |
250 | D40_CHAN_REG_SDPTR, |
251 | D40_CHAN_REG_SDLNK, |
252 | }; |
253 | |
254 | #define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \ |
255 | BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B) |
256 | |
257 | /** |
258 | * struct d40_interrupt_lookup - lookup table for interrupt handler |
259 | * |
260 | * @src: Interrupt mask register. |
261 | * @clr: Interrupt clear register. |
262 | * @is_error: true if this is an error interrupt. |
263 | * @offset: start delta in the lookup_log_chans in d40_base. If equals to |
264 | * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. |
265 | */ |
266 | struct d40_interrupt_lookup { |
267 | u32 src; |
268 | u32 clr; |
269 | bool is_error; |
270 | int offset; |
271 | }; |
272 | |
273 | |
274 | static struct d40_interrupt_lookup il_v4a[] = { |
275 | {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, |
276 | {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, |
277 | {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, |
278 | {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, |
279 | {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, |
280 | {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, |
281 | {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, |
282 | {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, |
283 | {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, |
284 | {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, |
285 | }; |
286 | |
287 | static struct d40_interrupt_lookup il_v4b[] = { |
288 | {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0}, |
289 | {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32}, |
290 | {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64}, |
291 | {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96}, |
292 | {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128}, |
293 | {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0}, |
294 | {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32}, |
295 | {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64}, |
296 | {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96}, |
297 | {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128}, |
298 | {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN}, |
299 | {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN}, |
300 | }; |
301 | |
302 | /** |
303 | * struct d40_reg_val - simple lookup struct |
304 | * |
305 | * @reg: The register. |
306 | * @val: The value that belongs to the register in reg. |
307 | */ |
308 | struct d40_reg_val { |
309 | unsigned int reg; |
310 | unsigned int val; |
311 | }; |
312 | |
313 | static __initdata struct d40_reg_val dma_init_reg_v4a[] = { |
314 | /* Clock every part of the DMA block from start */ |
315 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, |
316 | |
317 | /* Interrupts on all logical channels */ |
318 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, |
319 | { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, |
320 | { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, |
321 | { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, |
322 | { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, |
323 | { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, |
324 | { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, |
325 | { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, |
326 | { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, |
327 | { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, |
328 | { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, |
329 | { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} |
330 | }; |
331 | static __initdata struct d40_reg_val dma_init_reg_v4b[] = { |
332 | /* Clock every part of the DMA block from start */ |
333 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, |
334 | |
335 | /* Interrupts on all logical channels */ |
336 | { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF}, |
337 | { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF}, |
338 | { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF}, |
339 | { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF}, |
340 | { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF}, |
341 | { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF}, |
342 | { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF}, |
343 | { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF}, |
344 | { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF}, |
345 | { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF}, |
346 | { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF}, |
347 | { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF}, |
348 | { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF}, |
349 | { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF}, |
350 | { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF} |
351 | }; |
352 | |
353 | /** |
354 | * struct d40_lli_pool - Structure for keeping LLIs in memory |
355 | * |
356 | * @base: Pointer to memory area when the pre_alloc_lli's are not large |
357 | * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if |
358 | * pre_alloc_lli is used. |
359 | * @dma_addr: DMA address, if mapped |
360 | * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. |
361 | * @pre_alloc_lli: Pre allocated area for the most common case of transfers, |
362 | * one buffer to one buffer. |
363 | */ |
364 | struct d40_lli_pool { |
365 | void *base; |
366 | int size; |
367 | dma_addr_t dma_addr; |
368 | /* Space for dst and src, plus an extra for padding */ |
369 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; |
370 | }; |
371 | |
372 | /** |
373 | * struct d40_desc - A descriptor is one DMA job. |
374 | * |
375 | * @lli_phy: LLI settings for physical channel. Both src and dst= |
376 | * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if |
377 | * lli_len equals one. |
378 | * @lli_log: Same as above but for logical channels. |
379 | * @lli_pool: The pool with two entries pre-allocated. |
380 | * @lli_len: Number of llis of current descriptor. |
381 | * @lli_current: Number of transferred llis. |
382 | * @lcla_alloc: Number of LCLA entries allocated. |
383 | * @txd: DMA engine struct. Used for among other things for communication |
384 | * during a transfer. |
385 | * @node: List entry. |
386 | * @is_in_client_list: true if the client owns this descriptor. |
387 | * @cyclic: true if this is a cyclic job |
388 | * |
389 | * This descriptor is used for both logical and physical transfers. |
390 | */ |
391 | struct d40_desc { |
392 | /* LLI physical */ |
393 | struct d40_phy_lli_bidir lli_phy; |
394 | /* LLI logical */ |
395 | struct d40_log_lli_bidir lli_log; |
396 | |
397 | struct d40_lli_pool lli_pool; |
398 | int lli_len; |
399 | int lli_current; |
400 | int lcla_alloc; |
401 | |
402 | struct dma_async_tx_descriptor txd; |
403 | struct list_head node; |
404 | |
405 | bool is_in_client_list; |
406 | bool cyclic; |
407 | }; |
408 | |
409 | /** |
410 | * struct d40_lcla_pool - LCLA pool settings and data. |
411 | * |
412 | * @base: The virtual address of LCLA. 18 bit aligned. |
413 | * @dma_addr: DMA address, if mapped |
414 | * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used. |
415 | * This pointer is only there for clean-up on error. |
416 | * @pages: The number of pages needed for all physical channels. |
417 | * Only used later for clean-up on error |
418 | * @lock: Lock to protect the content in this struct. |
419 | * @alloc_map: big map over which LCLA entry is own by which job. |
420 | */ |
421 | struct d40_lcla_pool { |
422 | void *base; |
423 | dma_addr_t dma_addr; |
424 | void *base_unaligned; |
425 | int pages; |
426 | spinlock_t lock; |
427 | struct d40_desc **alloc_map; |
428 | }; |
429 | |
430 | /** |
431 | * struct d40_phy_res - struct for handling eventlines mapped to physical |
432 | * channels. |
433 | * |
434 | * @lock: A lock protection this entity. |
435 | * @reserved: True if used by secure world or otherwise. |
436 | * @num: The physical channel number of this entity. |
437 | * @allocated_src: Bit mapped to show which src event line's are mapped to |
438 | * this physical channel. Can also be free or physically allocated. |
439 | * @allocated_dst: Same as for src but is dst. |
440 | * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as |
441 | * event line number. |
442 | * @use_soft_lli: To mark if the linked lists of channel are managed by SW. |
443 | */ |
444 | struct d40_phy_res { |
445 | spinlock_t lock; |
446 | bool reserved; |
447 | int num; |
448 | u32 allocated_src; |
449 | u32 allocated_dst; |
450 | bool use_soft_lli; |
451 | }; |
452 | |
453 | struct d40_base; |
454 | |
455 | /** |
456 | * struct d40_chan - Struct that describes a channel. |
457 | * |
458 | * @lock: A spinlock to protect this struct. |
459 | * @log_num: The logical number, if any of this channel. |
460 | * @pending_tx: The number of pending transfers. Used between interrupt handler |
461 | * and tasklet. |
462 | * @busy: Set to true when transfer is ongoing on this channel. |
463 | * @phy_chan: Pointer to physical channel which this instance runs on. If this |
464 | * point is NULL, then the channel is not allocated. |
465 | * @chan: DMA engine handle. |
466 | * @tasklet: Tasklet that gets scheduled from interrupt context to complete a |
467 | * transfer and call client callback. |
468 | * @client: Cliented owned descriptor list. |
469 | * @pending_queue: Submitted jobs, to be issued by issue_pending() |
470 | * @active: Active descriptor. |
471 | * @done: Completed jobs |
472 | * @queue: Queued jobs. |
473 | * @prepare_queue: Prepared jobs. |
474 | * @dma_cfg: The client configuration of this dma channel. |
475 | * @slave_config: DMA slave configuration. |
476 | * @configured: whether the dma_cfg configuration is valid |
477 | * @base: Pointer to the device instance struct. |
478 | * @src_def_cfg: Default cfg register setting for src. |
479 | * @dst_def_cfg: Default cfg register setting for dst. |
480 | * @log_def: Default logical channel settings. |
481 | * @lcpa: Pointer to dst and src lcpa settings. |
482 | * @runtime_addr: runtime configured address. |
483 | * @runtime_direction: runtime configured direction. |
484 | * |
485 | * This struct can either "be" a logical or a physical channel. |
486 | */ |
487 | struct d40_chan { |
488 | spinlock_t lock; |
489 | int log_num; |
490 | int pending_tx; |
491 | bool busy; |
492 | struct d40_phy_res *phy_chan; |
493 | struct dma_chan chan; |
494 | struct tasklet_struct tasklet; |
495 | struct list_head client; |
496 | struct list_head pending_queue; |
497 | struct list_head active; |
498 | struct list_head done; |
499 | struct list_head queue; |
500 | struct list_head prepare_queue; |
501 | struct stedma40_chan_cfg dma_cfg; |
502 | struct dma_slave_config slave_config; |
503 | bool configured; |
504 | struct d40_base *base; |
505 | /* Default register configurations */ |
506 | u32 src_def_cfg; |
507 | u32 dst_def_cfg; |
508 | struct d40_def_lcsp log_def; |
509 | struct d40_log_lli_full *lcpa; |
510 | /* Runtime reconfiguration */ |
511 | dma_addr_t runtime_addr; |
512 | enum dma_transfer_direction runtime_direction; |
513 | }; |
514 | |
515 | /** |
516 | * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA |
517 | * controller |
518 | * |
519 | * @backup: the pointer to the registers address array for backup |
520 | * @backup_size: the size of the registers address array for backup |
521 | * @realtime_en: the realtime enable register |
522 | * @realtime_clear: the realtime clear register |
523 | * @high_prio_en: the high priority enable register |
524 | * @high_prio_clear: the high priority clear register |
525 | * @interrupt_en: the interrupt enable register |
526 | * @interrupt_clear: the interrupt clear register |
527 | * @il: the pointer to struct d40_interrupt_lookup |
528 | * @il_size: the size of d40_interrupt_lookup array |
529 | * @init_reg: the pointer to the struct d40_reg_val |
530 | * @init_reg_size: the size of d40_reg_val array |
531 | */ |
532 | struct d40_gen_dmac { |
533 | u32 *backup; |
534 | u32 backup_size; |
535 | u32 realtime_en; |
536 | u32 realtime_clear; |
537 | u32 high_prio_en; |
538 | u32 high_prio_clear; |
539 | u32 interrupt_en; |
540 | u32 interrupt_clear; |
541 | struct d40_interrupt_lookup *il; |
542 | u32 il_size; |
543 | struct d40_reg_val *init_reg; |
544 | u32 init_reg_size; |
545 | }; |
546 | |
547 | /** |
548 | * struct d40_base - The big global struct, one for each probe'd instance. |
549 | * |
550 | * @interrupt_lock: Lock used to make sure one interrupt is handle a time. |
551 | * @execmd_lock: Lock for execute command usage since several channels share |
552 | * the same physical register. |
553 | * @dev: The device structure. |
554 | * @virtbase: The virtual base address of the DMA's register. |
555 | * @rev: silicon revision detected. |
556 | * @clk: Pointer to the DMA clock structure. |
557 | * @irq: The IRQ number. |
558 | * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem |
559 | * transfers). |
560 | * @num_phy_chans: The number of physical channels. Read from HW. This |
561 | * is the number of available channels for this driver, not counting "Secure |
562 | * mode" allocated physical channels. |
563 | * @num_log_chans: The number of logical channels. Calculated from |
564 | * num_phy_chans. |
565 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. |
566 | * @dma_slave: dma_device channels that can do only do slave transfers. |
567 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. |
568 | * @phy_chans: Room for all possible physical channels in system. |
569 | * @log_chans: Room for all possible logical channels in system. |
570 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points |
571 | * to log_chans entries. |
572 | * @lookup_phy_chans: Used to map interrupt number to physical channel. Points |
573 | * to phy_chans entries. |
574 | * @plat_data: Pointer to provided platform_data which is the driver |
575 | * configuration. |
576 | * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla. |
577 | * @phy_res: Vector containing all physical channels. |
578 | * @lcla_pool: lcla pool settings and data. |
579 | * @lcpa_base: The virtual mapped address of LCPA. |
580 | * @phy_lcpa: The physical address of the LCPA. |
581 | * @lcpa_size: The size of the LCPA area. |
582 | * @desc_slab: cache for descriptors. |
583 | * @reg_val_backup: Here the values of some hardware registers are stored |
584 | * before the DMA is powered off. They are restored when the power is back on. |
585 | * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and |
586 | * later |
587 | * @reg_val_backup_chan: Backup data for standard channel parameter registers. |
588 | * @regs_interrupt: Scratch space for registers during interrupt. |
589 | * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. |
590 | * @gen_dmac: the struct for generic registers values to represent u8500/8540 |
591 | * DMA controller |
592 | */ |
593 | struct d40_base { |
594 | spinlock_t interrupt_lock; |
595 | spinlock_t execmd_lock; |
596 | struct device *dev; |
597 | void __iomem *virtbase; |
598 | u8 rev:4; |
599 | struct clk *clk; |
600 | int irq; |
601 | int num_memcpy_chans; |
602 | int num_phy_chans; |
603 | int num_log_chans; |
604 | struct dma_device dma_both; |
605 | struct dma_device dma_slave; |
606 | struct dma_device dma_memcpy; |
607 | struct d40_chan *phy_chans; |
608 | struct d40_chan *log_chans; |
609 | struct d40_chan **lookup_log_chans; |
610 | struct d40_chan **lookup_phy_chans; |
611 | struct stedma40_platform_data *plat_data; |
612 | struct regulator *lcpa_regulator; |
613 | /* Physical half channels */ |
614 | struct d40_phy_res *phy_res; |
615 | struct d40_lcla_pool lcla_pool; |
616 | void *lcpa_base; |
617 | dma_addr_t phy_lcpa; |
618 | resource_size_t lcpa_size; |
619 | struct kmem_cache *desc_slab; |
620 | u32 reg_val_backup[BACKUP_REGS_SZ]; |
621 | u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX]; |
622 | u32 *reg_val_backup_chan; |
623 | u32 *regs_interrupt; |
624 | u16 gcc_pwr_off_mask; |
625 | struct d40_gen_dmac gen_dmac; |
626 | }; |
627 | |
628 | static struct device *chan2dev(struct d40_chan *d40c) |
629 | { |
630 | return &d40c->chan.dev->device; |
631 | } |
632 | |
633 | static bool chan_is_physical(struct d40_chan *chan) |
634 | { |
635 | return chan->log_num == D40_PHY_CHAN; |
636 | } |
637 | |
638 | static bool chan_is_logical(struct d40_chan *chan) |
639 | { |
640 | return !chan_is_physical(chan); |
641 | } |
642 | |
643 | static void __iomem *chan_base(struct d40_chan *chan) |
644 | { |
645 | return chan->base->virtbase + D40_DREG_PCBASE + |
646 | chan->phy_chan->num * D40_DREG_PCDELTA; |
647 | } |
648 | |
649 | #define d40_err(dev, format, arg...) \ |
650 | dev_err(dev, "[%s] " format, __func__, ## arg) |
651 | |
652 | #define chan_err(d40c, format, arg...) \ |
653 | d40_err(chan2dev(d40c), format, ## arg) |
654 | |
655 | static int d40_set_runtime_config_write(struct dma_chan *chan, |
656 | struct dma_slave_config *config, |
657 | enum dma_transfer_direction direction); |
658 | |
659 | static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, |
660 | int lli_len) |
661 | { |
662 | bool is_log = chan_is_logical(chan: d40c); |
663 | u32 align; |
664 | void *base; |
665 | |
666 | if (is_log) |
667 | align = sizeof(struct d40_log_lli); |
668 | else |
669 | align = sizeof(struct d40_phy_lli); |
670 | |
671 | if (lli_len == 1) { |
672 | base = d40d->lli_pool.pre_alloc_lli; |
673 | d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); |
674 | d40d->lli_pool.base = NULL; |
675 | } else { |
676 | d40d->lli_pool.size = lli_len * 2 * align; |
677 | |
678 | base = kmalloc(size: d40d->lli_pool.size + align, GFP_NOWAIT); |
679 | d40d->lli_pool.base = base; |
680 | |
681 | if (d40d->lli_pool.base == NULL) |
682 | return -ENOMEM; |
683 | } |
684 | |
685 | if (is_log) { |
686 | d40d->lli_log.src = PTR_ALIGN(base, align); |
687 | d40d->lli_log.dst = d40d->lli_log.src + lli_len; |
688 | |
689 | d40d->lli_pool.dma_addr = 0; |
690 | } else { |
691 | d40d->lli_phy.src = PTR_ALIGN(base, align); |
692 | d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; |
693 | |
694 | d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, |
695 | d40d->lli_phy.src, |
696 | d40d->lli_pool.size, |
697 | DMA_TO_DEVICE); |
698 | |
699 | if (dma_mapping_error(dev: d40c->base->dev, |
700 | dma_addr: d40d->lli_pool.dma_addr)) { |
701 | kfree(objp: d40d->lli_pool.base); |
702 | d40d->lli_pool.base = NULL; |
703 | d40d->lli_pool.dma_addr = 0; |
704 | return -ENOMEM; |
705 | } |
706 | } |
707 | |
708 | return 0; |
709 | } |
710 | |
711 | static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d) |
712 | { |
713 | if (d40d->lli_pool.dma_addr) |
714 | dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, |
715 | d40d->lli_pool.size, DMA_TO_DEVICE); |
716 | |
717 | kfree(objp: d40d->lli_pool.base); |
718 | d40d->lli_pool.base = NULL; |
719 | d40d->lli_pool.size = 0; |
720 | d40d->lli_log.src = NULL; |
721 | d40d->lli_log.dst = NULL; |
722 | d40d->lli_phy.src = NULL; |
723 | d40d->lli_phy.dst = NULL; |
724 | } |
725 | |
726 | static int d40_lcla_alloc_one(struct d40_chan *d40c, |
727 | struct d40_desc *d40d) |
728 | { |
729 | unsigned long flags; |
730 | int i; |
731 | int ret = -EINVAL; |
732 | |
733 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
734 | |
735 | /* |
736 | * Allocate both src and dst at the same time, therefore the half |
737 | * start on 1 since 0 can't be used since zero is used as end marker. |
738 | */ |
739 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { |
740 | int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; |
741 | |
742 | if (!d40c->base->lcla_pool.alloc_map[idx]) { |
743 | d40c->base->lcla_pool.alloc_map[idx] = d40d; |
744 | d40d->lcla_alloc++; |
745 | ret = i; |
746 | break; |
747 | } |
748 | } |
749 | |
750 | spin_unlock_irqrestore(lock: &d40c->base->lcla_pool.lock, flags); |
751 | |
752 | return ret; |
753 | } |
754 | |
755 | static int d40_lcla_free_all(struct d40_chan *d40c, |
756 | struct d40_desc *d40d) |
757 | { |
758 | unsigned long flags; |
759 | int i; |
760 | int ret = -EINVAL; |
761 | |
762 | if (chan_is_physical(chan: d40c)) |
763 | return 0; |
764 | |
765 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
766 | |
767 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { |
768 | int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; |
769 | |
770 | if (d40c->base->lcla_pool.alloc_map[idx] == d40d) { |
771 | d40c->base->lcla_pool.alloc_map[idx] = NULL; |
772 | d40d->lcla_alloc--; |
773 | if (d40d->lcla_alloc == 0) { |
774 | ret = 0; |
775 | break; |
776 | } |
777 | } |
778 | } |
779 | |
780 | spin_unlock_irqrestore(lock: &d40c->base->lcla_pool.lock, flags); |
781 | |
782 | return ret; |
783 | |
784 | } |
785 | |
786 | static void d40_desc_remove(struct d40_desc *d40d) |
787 | { |
788 | list_del(entry: &d40d->node); |
789 | } |
790 | |
791 | static struct d40_desc *d40_desc_get(struct d40_chan *d40c) |
792 | { |
793 | struct d40_desc *desc = NULL; |
794 | |
795 | if (!list_empty(head: &d40c->client)) { |
796 | struct d40_desc *d; |
797 | struct d40_desc *_d; |
798 | |
799 | list_for_each_entry_safe(d, _d, &d40c->client, node) { |
800 | if (async_tx_test_ack(tx: &d->txd)) { |
801 | d40_desc_remove(d40d: d); |
802 | desc = d; |
803 | memset(desc, 0, sizeof(*desc)); |
804 | break; |
805 | } |
806 | } |
807 | } |
808 | |
809 | if (!desc) |
810 | desc = kmem_cache_zalloc(k: d40c->base->desc_slab, GFP_NOWAIT); |
811 | |
812 | if (desc) |
813 | INIT_LIST_HEAD(list: &desc->node); |
814 | |
815 | return desc; |
816 | } |
817 | |
818 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) |
819 | { |
820 | |
821 | d40_pool_lli_free(d40c, d40d); |
822 | d40_lcla_free_all(d40c, d40d); |
823 | kmem_cache_free(s: d40c->base->desc_slab, objp: d40d); |
824 | } |
825 | |
826 | static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) |
827 | { |
828 | list_add_tail(new: &desc->node, head: &d40c->active); |
829 | } |
830 | |
831 | static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) |
832 | { |
833 | struct d40_phy_lli *lli_dst = desc->lli_phy.dst; |
834 | struct d40_phy_lli *lli_src = desc->lli_phy.src; |
835 | void __iomem *base = chan_base(chan); |
836 | |
837 | writel(val: lli_src->reg_cfg, addr: base + D40_CHAN_REG_SSCFG); |
838 | writel(val: lli_src->reg_elt, addr: base + D40_CHAN_REG_SSELT); |
839 | writel(val: lli_src->reg_ptr, addr: base + D40_CHAN_REG_SSPTR); |
840 | writel(val: lli_src->reg_lnk, addr: base + D40_CHAN_REG_SSLNK); |
841 | |
842 | writel(val: lli_dst->reg_cfg, addr: base + D40_CHAN_REG_SDCFG); |
843 | writel(val: lli_dst->reg_elt, addr: base + D40_CHAN_REG_SDELT); |
844 | writel(val: lli_dst->reg_ptr, addr: base + D40_CHAN_REG_SDPTR); |
845 | writel(val: lli_dst->reg_lnk, addr: base + D40_CHAN_REG_SDLNK); |
846 | } |
847 | |
848 | static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc) |
849 | { |
850 | list_add_tail(new: &desc->node, head: &d40c->done); |
851 | } |
852 | |
853 | static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) |
854 | { |
855 | struct d40_lcla_pool *pool = &chan->base->lcla_pool; |
856 | struct d40_log_lli_bidir *lli = &desc->lli_log; |
857 | int lli_current = desc->lli_current; |
858 | int lli_len = desc->lli_len; |
859 | bool cyclic = desc->cyclic; |
860 | int curr_lcla = -EINVAL; |
861 | int first_lcla = 0; |
862 | bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; |
863 | bool linkback; |
864 | |
865 | /* |
866 | * We may have partially running cyclic transfers, in case we did't get |
867 | * enough LCLA entries. |
868 | */ |
869 | linkback = cyclic && lli_current == 0; |
870 | |
871 | /* |
872 | * For linkback, we need one LCLA even with only one link, because we |
873 | * can't link back to the one in LCPA space |
874 | */ |
875 | if (linkback || (lli_len - lli_current > 1)) { |
876 | /* |
877 | * If the channel is expected to use only soft_lli don't |
878 | * allocate a lcla. This is to avoid a HW issue that exists |
879 | * in some controller during a peripheral to memory transfer |
880 | * that uses linked lists. |
881 | */ |
882 | if (!(chan->phy_chan->use_soft_lli && |
883 | chan->dma_cfg.dir == DMA_DEV_TO_MEM)) |
884 | curr_lcla = d40_lcla_alloc_one(d40c: chan, d40d: desc); |
885 | |
886 | first_lcla = curr_lcla; |
887 | } |
888 | |
889 | /* |
890 | * For linkback, we normally load the LCPA in the loop since we need to |
891 | * link it to the second LCLA and not the first. However, if we |
892 | * couldn't even get a first LCLA, then we have to run in LCPA and |
893 | * reload manually. |
894 | */ |
895 | if (!linkback || curr_lcla == -EINVAL) { |
896 | unsigned int flags = 0; |
897 | |
898 | if (curr_lcla == -EINVAL) |
899 | flags |= LLI_TERM_INT; |
900 | |
901 | d40_log_lli_lcpa_write(lcpa: chan->lcpa, |
902 | lli_dst: &lli->dst[lli_current], |
903 | lli_src: &lli->src[lli_current], |
904 | next: curr_lcla, |
905 | flags); |
906 | lli_current++; |
907 | } |
908 | |
909 | if (curr_lcla < 0) |
910 | goto set_current; |
911 | |
912 | for (; lli_current < lli_len; lli_current++) { |
913 | unsigned int lcla_offset = chan->phy_chan->num * 1024 + |
914 | 8 * curr_lcla * 2; |
915 | struct d40_log_lli *lcla = pool->base + lcla_offset; |
916 | unsigned int flags = 0; |
917 | int next_lcla; |
918 | |
919 | if (lli_current + 1 < lli_len) |
920 | next_lcla = d40_lcla_alloc_one(d40c: chan, d40d: desc); |
921 | else |
922 | next_lcla = linkback ? first_lcla : -EINVAL; |
923 | |
924 | if (cyclic || next_lcla == -EINVAL) |
925 | flags |= LLI_TERM_INT; |
926 | |
927 | if (linkback && curr_lcla == first_lcla) { |
928 | /* First link goes in both LCPA and LCLA */ |
929 | d40_log_lli_lcpa_write(lcpa: chan->lcpa, |
930 | lli_dst: &lli->dst[lli_current], |
931 | lli_src: &lli->src[lli_current], |
932 | next: next_lcla, flags); |
933 | } |
934 | |
935 | /* |
936 | * One unused LCLA in the cyclic case if the very first |
937 | * next_lcla fails... |
938 | */ |
939 | d40_log_lli_lcla_write(lcla, |
940 | lli_dst: &lli->dst[lli_current], |
941 | lli_src: &lli->src[lli_current], |
942 | next: next_lcla, flags); |
943 | |
944 | /* |
945 | * Cache maintenance is not needed if lcla is |
946 | * mapped in esram |
947 | */ |
948 | if (!use_esram_lcla) { |
949 | dma_sync_single_range_for_device(dev: chan->base->dev, |
950 | addr: pool->dma_addr, offset: lcla_offset, |
951 | size: 2 * sizeof(struct d40_log_lli), |
952 | dir: DMA_TO_DEVICE); |
953 | } |
954 | curr_lcla = next_lcla; |
955 | |
956 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { |
957 | lli_current++; |
958 | break; |
959 | } |
960 | } |
961 | set_current: |
962 | desc->lli_current = lli_current; |
963 | } |
964 | |
965 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) |
966 | { |
967 | if (chan_is_physical(chan: d40c)) { |
968 | d40_phy_lli_load(chan: d40c, desc: d40d); |
969 | d40d->lli_current = d40d->lli_len; |
970 | } else |
971 | d40_log_lli_to_lcxa(chan: d40c, desc: d40d); |
972 | } |
973 | |
974 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) |
975 | { |
976 | return list_first_entry_or_null(&d40c->active, struct d40_desc, node); |
977 | } |
978 | |
979 | /* remove desc from current queue and add it to the pending_queue */ |
980 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) |
981 | { |
982 | d40_desc_remove(d40d: desc); |
983 | desc->is_in_client_list = false; |
984 | list_add_tail(new: &desc->node, head: &d40c->pending_queue); |
985 | } |
986 | |
987 | static struct d40_desc *d40_first_pending(struct d40_chan *d40c) |
988 | { |
989 | return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc, |
990 | node); |
991 | } |
992 | |
993 | static struct d40_desc *d40_first_queued(struct d40_chan *d40c) |
994 | { |
995 | return list_first_entry_or_null(&d40c->queue, struct d40_desc, node); |
996 | } |
997 | |
998 | static struct d40_desc *d40_first_done(struct d40_chan *d40c) |
999 | { |
1000 | return list_first_entry_or_null(&d40c->done, struct d40_desc, node); |
1001 | } |
1002 | |
1003 | static int d40_psize_2_burst_size(bool is_log, int psize) |
1004 | { |
1005 | if (is_log) { |
1006 | if (psize == STEDMA40_PSIZE_LOG_1) |
1007 | return 1; |
1008 | } else { |
1009 | if (psize == STEDMA40_PSIZE_PHY_1) |
1010 | return 1; |
1011 | } |
1012 | |
1013 | return 2 << psize; |
1014 | } |
1015 | |
1016 | /* |
1017 | * The dma only supports transmitting packages up to |
1018 | * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes. |
1019 | * |
1020 | * Calculate the total number of dma elements required to send the entire sg list. |
1021 | */ |
1022 | static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) |
1023 | { |
1024 | int dmalen; |
1025 | u32 max_w = max(data_width1, data_width2); |
1026 | u32 min_w = min(data_width1, data_width2); |
1027 | u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w); |
1028 | |
1029 | if (seg_max > STEDMA40_MAX_SEG_SIZE) |
1030 | seg_max -= max_w; |
1031 | |
1032 | if (!IS_ALIGNED(size, max_w)) |
1033 | return -EINVAL; |
1034 | |
1035 | if (size <= seg_max) |
1036 | dmalen = 1; |
1037 | else { |
1038 | dmalen = size / seg_max; |
1039 | if (dmalen * seg_max < size) |
1040 | dmalen++; |
1041 | } |
1042 | return dmalen; |
1043 | } |
1044 | |
1045 | static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, |
1046 | u32 data_width1, u32 data_width2) |
1047 | { |
1048 | struct scatterlist *sg; |
1049 | int i; |
1050 | int len = 0; |
1051 | int ret; |
1052 | |
1053 | for_each_sg(sgl, sg, sg_len, i) { |
1054 | ret = d40_size_2_dmalen(sg_dma_len(sg), |
1055 | data_width1, data_width2); |
1056 | if (ret < 0) |
1057 | return ret; |
1058 | len += ret; |
1059 | } |
1060 | return len; |
1061 | } |
1062 | |
1063 | static int __d40_execute_command_phy(struct d40_chan *d40c, |
1064 | enum d40_command command) |
1065 | { |
1066 | u32 status; |
1067 | int i; |
1068 | void __iomem *active_reg; |
1069 | int ret = 0; |
1070 | unsigned long flags; |
1071 | u32 wmask; |
1072 | |
1073 | if (command == D40_DMA_STOP) { |
1074 | ret = __d40_execute_command_phy(d40c, command: D40_DMA_SUSPEND_REQ); |
1075 | if (ret) |
1076 | return ret; |
1077 | } |
1078 | |
1079 | spin_lock_irqsave(&d40c->base->execmd_lock, flags); |
1080 | |
1081 | if (d40c->phy_chan->num % 2 == 0) |
1082 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; |
1083 | else |
1084 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; |
1085 | |
1086 | if (command == D40_DMA_SUSPEND_REQ) { |
1087 | status = (readl(addr: active_reg) & |
1088 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> |
1089 | D40_CHAN_POS(d40c->phy_chan->num); |
1090 | |
1091 | if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) |
1092 | goto unlock; |
1093 | } |
1094 | |
1095 | wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); |
1096 | writel(val: wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)), |
1097 | addr: active_reg); |
1098 | |
1099 | if (command == D40_DMA_SUSPEND_REQ) { |
1100 | |
1101 | for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) { |
1102 | status = (readl(addr: active_reg) & |
1103 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> |
1104 | D40_CHAN_POS(d40c->phy_chan->num); |
1105 | |
1106 | cpu_relax(); |
1107 | /* |
1108 | * Reduce the number of bus accesses while |
1109 | * waiting for the DMA to suspend. |
1110 | */ |
1111 | udelay(3); |
1112 | |
1113 | if (status == D40_DMA_STOP || |
1114 | status == D40_DMA_SUSPENDED) |
1115 | break; |
1116 | } |
1117 | |
1118 | if (i == D40_SUSPEND_MAX_IT) { |
1119 | chan_err(d40c, |
1120 | "unable to suspend the chl %d (log: %d) status %x\n" , |
1121 | d40c->phy_chan->num, d40c->log_num, |
1122 | status); |
1123 | dump_stack(); |
1124 | ret = -EBUSY; |
1125 | } |
1126 | |
1127 | } |
1128 | unlock: |
1129 | spin_unlock_irqrestore(lock: &d40c->base->execmd_lock, flags); |
1130 | return ret; |
1131 | } |
1132 | |
1133 | static void d40_term_all(struct d40_chan *d40c) |
1134 | { |
1135 | struct d40_desc *d40d; |
1136 | struct d40_desc *_d; |
1137 | |
1138 | /* Release completed descriptors */ |
1139 | while ((d40d = d40_first_done(d40c))) { |
1140 | d40_desc_remove(d40d); |
1141 | d40_desc_free(d40c, d40d); |
1142 | } |
1143 | |
1144 | /* Release active descriptors */ |
1145 | while ((d40d = d40_first_active_get(d40c))) { |
1146 | d40_desc_remove(d40d); |
1147 | d40_desc_free(d40c, d40d); |
1148 | } |
1149 | |
1150 | /* Release queued descriptors waiting for transfer */ |
1151 | while ((d40d = d40_first_queued(d40c))) { |
1152 | d40_desc_remove(d40d); |
1153 | d40_desc_free(d40c, d40d); |
1154 | } |
1155 | |
1156 | /* Release pending descriptors */ |
1157 | while ((d40d = d40_first_pending(d40c))) { |
1158 | d40_desc_remove(d40d); |
1159 | d40_desc_free(d40c, d40d); |
1160 | } |
1161 | |
1162 | /* Release client owned descriptors */ |
1163 | if (!list_empty(head: &d40c->client)) |
1164 | list_for_each_entry_safe(d40d, _d, &d40c->client, node) { |
1165 | d40_desc_remove(d40d); |
1166 | d40_desc_free(d40c, d40d); |
1167 | } |
1168 | |
1169 | /* Release descriptors in prepare queue */ |
1170 | if (!list_empty(head: &d40c->prepare_queue)) |
1171 | list_for_each_entry_safe(d40d, _d, |
1172 | &d40c->prepare_queue, node) { |
1173 | d40_desc_remove(d40d); |
1174 | d40_desc_free(d40c, d40d); |
1175 | } |
1176 | |
1177 | d40c->pending_tx = 0; |
1178 | } |
1179 | |
1180 | static void __d40_config_set_event(struct d40_chan *d40c, |
1181 | enum d40_events event_type, u32 event, |
1182 | int reg) |
1183 | { |
1184 | void __iomem *addr = chan_base(chan: d40c) + reg; |
1185 | int tries; |
1186 | u32 status; |
1187 | |
1188 | switch (event_type) { |
1189 | |
1190 | case D40_DEACTIVATE_EVENTLINE: |
1191 | |
1192 | writel(val: (D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) |
1193 | | ~D40_EVENTLINE_MASK(event), addr); |
1194 | break; |
1195 | |
1196 | case D40_SUSPEND_REQ_EVENTLINE: |
1197 | status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> |
1198 | D40_EVENTLINE_POS(event); |
1199 | |
1200 | if (status == D40_DEACTIVATE_EVENTLINE || |
1201 | status == D40_SUSPEND_REQ_EVENTLINE) |
1202 | break; |
1203 | |
1204 | writel(val: (D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event)) |
1205 | | ~D40_EVENTLINE_MASK(event), addr); |
1206 | |
1207 | for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) { |
1208 | |
1209 | status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> |
1210 | D40_EVENTLINE_POS(event); |
1211 | |
1212 | cpu_relax(); |
1213 | /* |
1214 | * Reduce the number of bus accesses while |
1215 | * waiting for the DMA to suspend. |
1216 | */ |
1217 | udelay(3); |
1218 | |
1219 | if (status == D40_DEACTIVATE_EVENTLINE) |
1220 | break; |
1221 | } |
1222 | |
1223 | if (tries == D40_SUSPEND_MAX_IT) { |
1224 | chan_err(d40c, |
1225 | "unable to stop the event_line chl %d (log: %d)" |
1226 | "status %x\n" , d40c->phy_chan->num, |
1227 | d40c->log_num, status); |
1228 | } |
1229 | break; |
1230 | |
1231 | case D40_ACTIVATE_EVENTLINE: |
1232 | /* |
1233 | * The hardware sometimes doesn't register the enable when src and dst |
1234 | * event lines are active on the same logical channel. Retry to ensure |
1235 | * it does. Usually only one retry is sufficient. |
1236 | */ |
1237 | tries = 100; |
1238 | while (--tries) { |
1239 | writel(val: (D40_ACTIVATE_EVENTLINE << |
1240 | D40_EVENTLINE_POS(event)) | |
1241 | ~D40_EVENTLINE_MASK(event), addr); |
1242 | |
1243 | if (readl(addr) & D40_EVENTLINE_MASK(event)) |
1244 | break; |
1245 | } |
1246 | |
1247 | if (tries != 99) |
1248 | dev_dbg(chan2dev(d40c), |
1249 | "[%s] workaround enable S%cLNK (%d tries)\n" , |
1250 | __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', |
1251 | 100 - tries); |
1252 | |
1253 | WARN_ON(!tries); |
1254 | break; |
1255 | |
1256 | case D40_ROUND_EVENTLINE: |
1257 | BUG(); |
1258 | break; |
1259 | |
1260 | } |
1261 | } |
1262 | |
1263 | static void d40_config_set_event(struct d40_chan *d40c, |
1264 | enum d40_events event_type) |
1265 | { |
1266 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); |
1267 | |
1268 | /* Enable event line connected to device (or memcpy) */ |
1269 | if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || |
1270 | (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) |
1271 | __d40_config_set_event(d40c, event_type, event, |
1272 | D40_CHAN_REG_SSLNK); |
1273 | |
1274 | if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM) |
1275 | __d40_config_set_event(d40c, event_type, event, |
1276 | D40_CHAN_REG_SDLNK); |
1277 | } |
1278 | |
1279 | static u32 d40_chan_has_events(struct d40_chan *d40c) |
1280 | { |
1281 | void __iomem *chanbase = chan_base(chan: d40c); |
1282 | u32 val; |
1283 | |
1284 | val = readl(addr: chanbase + D40_CHAN_REG_SSLNK); |
1285 | val |= readl(addr: chanbase + D40_CHAN_REG_SDLNK); |
1286 | |
1287 | return val; |
1288 | } |
1289 | |
1290 | static int |
1291 | __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command) |
1292 | { |
1293 | unsigned long flags; |
1294 | int ret = 0; |
1295 | u32 active_status; |
1296 | void __iomem *active_reg; |
1297 | |
1298 | if (d40c->phy_chan->num % 2 == 0) |
1299 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; |
1300 | else |
1301 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; |
1302 | |
1303 | |
1304 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); |
1305 | |
1306 | switch (command) { |
1307 | case D40_DMA_STOP: |
1308 | case D40_DMA_SUSPEND_REQ: |
1309 | |
1310 | active_status = (readl(addr: active_reg) & |
1311 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> |
1312 | D40_CHAN_POS(d40c->phy_chan->num); |
1313 | |
1314 | if (active_status == D40_DMA_RUN) |
1315 | d40_config_set_event(d40c, event_type: D40_SUSPEND_REQ_EVENTLINE); |
1316 | else |
1317 | d40_config_set_event(d40c, event_type: D40_DEACTIVATE_EVENTLINE); |
1318 | |
1319 | if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP)) |
1320 | ret = __d40_execute_command_phy(d40c, command); |
1321 | |
1322 | break; |
1323 | |
1324 | case D40_DMA_RUN: |
1325 | |
1326 | d40_config_set_event(d40c, event_type: D40_ACTIVATE_EVENTLINE); |
1327 | ret = __d40_execute_command_phy(d40c, command); |
1328 | break; |
1329 | |
1330 | case D40_DMA_SUSPENDED: |
1331 | BUG(); |
1332 | break; |
1333 | } |
1334 | |
1335 | spin_unlock_irqrestore(lock: &d40c->phy_chan->lock, flags); |
1336 | return ret; |
1337 | } |
1338 | |
1339 | static int d40_channel_execute_command(struct d40_chan *d40c, |
1340 | enum d40_command command) |
1341 | { |
1342 | if (chan_is_logical(chan: d40c)) |
1343 | return __d40_execute_command_log(d40c, command); |
1344 | else |
1345 | return __d40_execute_command_phy(d40c, command); |
1346 | } |
1347 | |
1348 | static u32 d40_get_prmo(struct d40_chan *d40c) |
1349 | { |
1350 | static const unsigned int phy_map[] = { |
1351 | [STEDMA40_PCHAN_BASIC_MODE] |
1352 | = D40_DREG_PRMO_PCHAN_BASIC, |
1353 | [STEDMA40_PCHAN_MODULO_MODE] |
1354 | = D40_DREG_PRMO_PCHAN_MODULO, |
1355 | [STEDMA40_PCHAN_DOUBLE_DST_MODE] |
1356 | = D40_DREG_PRMO_PCHAN_DOUBLE_DST, |
1357 | }; |
1358 | static const unsigned int log_map[] = { |
1359 | [STEDMA40_LCHAN_SRC_PHY_DST_LOG] |
1360 | = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG, |
1361 | [STEDMA40_LCHAN_SRC_LOG_DST_PHY] |
1362 | = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY, |
1363 | [STEDMA40_LCHAN_SRC_LOG_DST_LOG] |
1364 | = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, |
1365 | }; |
1366 | |
1367 | if (chan_is_physical(chan: d40c)) |
1368 | return phy_map[d40c->dma_cfg.mode_opt]; |
1369 | else |
1370 | return log_map[d40c->dma_cfg.mode_opt]; |
1371 | } |
1372 | |
1373 | static void d40_config_write(struct d40_chan *d40c) |
1374 | { |
1375 | u32 addr_base; |
1376 | u32 var; |
1377 | |
1378 | /* Odd addresses are even addresses + 4 */ |
1379 | addr_base = (d40c->phy_chan->num % 2) * 4; |
1380 | /* Setup channel mode to logical or physical */ |
1381 | var = ((u32)(chan_is_logical(chan: d40c)) + 1) << |
1382 | D40_CHAN_POS(d40c->phy_chan->num); |
1383 | writel(val: var, addr: d40c->base->virtbase + D40_DREG_PRMSE + addr_base); |
1384 | |
1385 | /* Setup operational mode option register */ |
1386 | var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num); |
1387 | |
1388 | writel(val: var, addr: d40c->base->virtbase + D40_DREG_PRMOE + addr_base); |
1389 | |
1390 | if (chan_is_logical(chan: d40c)) { |
1391 | int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) |
1392 | & D40_SREG_ELEM_LOG_LIDX_MASK; |
1393 | void __iomem *chanbase = chan_base(chan: d40c); |
1394 | |
1395 | /* Set default config for CFG reg */ |
1396 | writel(val: d40c->src_def_cfg, addr: chanbase + D40_CHAN_REG_SSCFG); |
1397 | writel(val: d40c->dst_def_cfg, addr: chanbase + D40_CHAN_REG_SDCFG); |
1398 | |
1399 | /* Set LIDX for lcla */ |
1400 | writel(val: lidx, addr: chanbase + D40_CHAN_REG_SSELT); |
1401 | writel(val: lidx, addr: chanbase + D40_CHAN_REG_SDELT); |
1402 | |
1403 | /* Clear LNK which will be used by d40_chan_has_events() */ |
1404 | writel(val: 0, addr: chanbase + D40_CHAN_REG_SSLNK); |
1405 | writel(val: 0, addr: chanbase + D40_CHAN_REG_SDLNK); |
1406 | } |
1407 | } |
1408 | |
1409 | static u32 d40_residue(struct d40_chan *d40c) |
1410 | { |
1411 | u32 num_elt; |
1412 | |
1413 | if (chan_is_logical(chan: d40c)) |
1414 | num_elt = (readl(addr: &d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) |
1415 | >> D40_MEM_LCSP2_ECNT_POS; |
1416 | else { |
1417 | u32 val = readl(addr: chan_base(chan: d40c) + D40_CHAN_REG_SDELT); |
1418 | num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK) |
1419 | >> D40_SREG_ELEM_PHY_ECNT_POS; |
1420 | } |
1421 | |
1422 | return num_elt * d40c->dma_cfg.dst_info.data_width; |
1423 | } |
1424 | |
1425 | static bool d40_tx_is_linked(struct d40_chan *d40c) |
1426 | { |
1427 | bool is_link; |
1428 | |
1429 | if (chan_is_logical(chan: d40c)) |
1430 | is_link = readl(addr: &d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; |
1431 | else |
1432 | is_link = readl(addr: chan_base(chan: d40c) + D40_CHAN_REG_SDLNK) |
1433 | & D40_SREG_LNK_PHYS_LNK_MASK; |
1434 | |
1435 | return is_link; |
1436 | } |
1437 | |
1438 | static int d40_pause(struct dma_chan *chan) |
1439 | { |
1440 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
1441 | int res = 0; |
1442 | unsigned long flags; |
1443 | |
1444 | if (d40c->phy_chan == NULL) { |
1445 | chan_err(d40c, "Channel is not allocated!\n" ); |
1446 | return -EINVAL; |
1447 | } |
1448 | |
1449 | if (!d40c->busy) |
1450 | return 0; |
1451 | |
1452 | spin_lock_irqsave(&d40c->lock, flags); |
1453 | pm_runtime_get_sync(dev: d40c->base->dev); |
1454 | |
1455 | res = d40_channel_execute_command(d40c, command: D40_DMA_SUSPEND_REQ); |
1456 | |
1457 | pm_runtime_mark_last_busy(dev: d40c->base->dev); |
1458 | pm_runtime_put_autosuspend(dev: d40c->base->dev); |
1459 | spin_unlock_irqrestore(lock: &d40c->lock, flags); |
1460 | return res; |
1461 | } |
1462 | |
1463 | static int d40_resume(struct dma_chan *chan) |
1464 | { |
1465 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
1466 | int res = 0; |
1467 | unsigned long flags; |
1468 | |
1469 | if (d40c->phy_chan == NULL) { |
1470 | chan_err(d40c, "Channel is not allocated!\n" ); |
1471 | return -EINVAL; |
1472 | } |
1473 | |
1474 | if (!d40c->busy) |
1475 | return 0; |
1476 | |
1477 | spin_lock_irqsave(&d40c->lock, flags); |
1478 | pm_runtime_get_sync(dev: d40c->base->dev); |
1479 | |
1480 | /* If bytes left to transfer or linked tx resume job */ |
1481 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) |
1482 | res = d40_channel_execute_command(d40c, command: D40_DMA_RUN); |
1483 | |
1484 | pm_runtime_mark_last_busy(dev: d40c->base->dev); |
1485 | pm_runtime_put_autosuspend(dev: d40c->base->dev); |
1486 | spin_unlock_irqrestore(lock: &d40c->lock, flags); |
1487 | return res; |
1488 | } |
1489 | |
1490 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) |
1491 | { |
1492 | struct d40_chan *d40c = container_of(tx->chan, |
1493 | struct d40_chan, |
1494 | chan); |
1495 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); |
1496 | unsigned long flags; |
1497 | dma_cookie_t cookie; |
1498 | |
1499 | spin_lock_irqsave(&d40c->lock, flags); |
1500 | cookie = dma_cookie_assign(tx); |
1501 | d40_desc_queue(d40c, desc: d40d); |
1502 | spin_unlock_irqrestore(lock: &d40c->lock, flags); |
1503 | |
1504 | return cookie; |
1505 | } |
1506 | |
1507 | static int d40_start(struct d40_chan *d40c) |
1508 | { |
1509 | return d40_channel_execute_command(d40c, command: D40_DMA_RUN); |
1510 | } |
1511 | |
1512 | static struct d40_desc *d40_queue_start(struct d40_chan *d40c) |
1513 | { |
1514 | struct d40_desc *d40d; |
1515 | int err; |
1516 | |
1517 | /* Start queued jobs, if any */ |
1518 | d40d = d40_first_queued(d40c); |
1519 | |
1520 | if (d40d != NULL) { |
1521 | if (!d40c->busy) { |
1522 | d40c->busy = true; |
1523 | pm_runtime_get_sync(dev: d40c->base->dev); |
1524 | } |
1525 | |
1526 | /* Remove from queue */ |
1527 | d40_desc_remove(d40d); |
1528 | |
1529 | /* Add to active queue */ |
1530 | d40_desc_submit(d40c, desc: d40d); |
1531 | |
1532 | /* Initiate DMA job */ |
1533 | d40_desc_load(d40c, d40d); |
1534 | |
1535 | /* Start dma job */ |
1536 | err = d40_start(d40c); |
1537 | |
1538 | if (err) |
1539 | return NULL; |
1540 | } |
1541 | |
1542 | return d40d; |
1543 | } |
1544 | |
1545 | /* called from interrupt context */ |
1546 | static void dma_tc_handle(struct d40_chan *d40c) |
1547 | { |
1548 | struct d40_desc *d40d; |
1549 | |
1550 | /* Get first active entry from list */ |
1551 | d40d = d40_first_active_get(d40c); |
1552 | |
1553 | if (d40d == NULL) |
1554 | return; |
1555 | |
1556 | if (d40d->cyclic) { |
1557 | /* |
1558 | * If this was a paritially loaded list, we need to reloaded |
1559 | * it, and only when the list is completed. We need to check |
1560 | * for done because the interrupt will hit for every link, and |
1561 | * not just the last one. |
1562 | */ |
1563 | if (d40d->lli_current < d40d->lli_len |
1564 | && !d40_tx_is_linked(d40c) |
1565 | && !d40_residue(d40c)) { |
1566 | d40_lcla_free_all(d40c, d40d); |
1567 | d40_desc_load(d40c, d40d); |
1568 | (void) d40_start(d40c); |
1569 | |
1570 | if (d40d->lli_current == d40d->lli_len) |
1571 | d40d->lli_current = 0; |
1572 | } |
1573 | } else { |
1574 | d40_lcla_free_all(d40c, d40d); |
1575 | |
1576 | if (d40d->lli_current < d40d->lli_len) { |
1577 | d40_desc_load(d40c, d40d); |
1578 | /* Start dma job */ |
1579 | (void) d40_start(d40c); |
1580 | return; |
1581 | } |
1582 | |
1583 | if (d40_queue_start(d40c) == NULL) { |
1584 | d40c->busy = false; |
1585 | |
1586 | pm_runtime_mark_last_busy(dev: d40c->base->dev); |
1587 | pm_runtime_put_autosuspend(dev: d40c->base->dev); |
1588 | } |
1589 | |
1590 | d40_desc_remove(d40d); |
1591 | d40_desc_done(d40c, desc: d40d); |
1592 | } |
1593 | |
1594 | d40c->pending_tx++; |
1595 | tasklet_schedule(t: &d40c->tasklet); |
1596 | |
1597 | } |
1598 | |
1599 | static void dma_tasklet(struct tasklet_struct *t) |
1600 | { |
1601 | struct d40_chan *d40c = from_tasklet(d40c, t, tasklet); |
1602 | struct d40_desc *d40d; |
1603 | unsigned long flags; |
1604 | bool callback_active; |
1605 | struct dmaengine_desc_callback cb; |
1606 | |
1607 | spin_lock_irqsave(&d40c->lock, flags); |
1608 | |
1609 | /* Get first entry from the done list */ |
1610 | d40d = d40_first_done(d40c); |
1611 | if (d40d == NULL) { |
1612 | /* Check if we have reached here for cyclic job */ |
1613 | d40d = d40_first_active_get(d40c); |
1614 | if (d40d == NULL || !d40d->cyclic) |
1615 | goto check_pending_tx; |
1616 | } |
1617 | |
1618 | if (!d40d->cyclic) |
1619 | dma_cookie_complete(tx: &d40d->txd); |
1620 | |
1621 | /* |
1622 | * If terminating a channel pending_tx is set to zero. |
1623 | * This prevents any finished active jobs to return to the client. |
1624 | */ |
1625 | if (d40c->pending_tx == 0) { |
1626 | spin_unlock_irqrestore(lock: &d40c->lock, flags); |
1627 | return; |
1628 | } |
1629 | |
1630 | /* Callback to client */ |
1631 | callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT); |
1632 | dmaengine_desc_get_callback(tx: &d40d->txd, cb: &cb); |
1633 | |
1634 | if (!d40d->cyclic) { |
1635 | if (async_tx_test_ack(tx: &d40d->txd)) { |
1636 | d40_desc_remove(d40d); |
1637 | d40_desc_free(d40c, d40d); |
1638 | } else if (!d40d->is_in_client_list) { |
1639 | d40_desc_remove(d40d); |
1640 | d40_lcla_free_all(d40c, d40d); |
1641 | list_add_tail(new: &d40d->node, head: &d40c->client); |
1642 | d40d->is_in_client_list = true; |
1643 | } |
1644 | } |
1645 | |
1646 | d40c->pending_tx--; |
1647 | |
1648 | if (d40c->pending_tx) |
1649 | tasklet_schedule(t: &d40c->tasklet); |
1650 | |
1651 | spin_unlock_irqrestore(lock: &d40c->lock, flags); |
1652 | |
1653 | if (callback_active) |
1654 | dmaengine_desc_callback_invoke(cb: &cb, NULL); |
1655 | |
1656 | return; |
1657 | check_pending_tx: |
1658 | /* Rescue manouver if receiving double interrupts */ |
1659 | if (d40c->pending_tx > 0) |
1660 | d40c->pending_tx--; |
1661 | spin_unlock_irqrestore(lock: &d40c->lock, flags); |
1662 | } |
1663 | |
1664 | static irqreturn_t d40_handle_interrupt(int irq, void *data) |
1665 | { |
1666 | int i; |
1667 | u32 idx; |
1668 | u32 row; |
1669 | long chan = -1; |
1670 | struct d40_chan *d40c; |
1671 | struct d40_base *base = data; |
1672 | u32 *regs = base->regs_interrupt; |
1673 | struct d40_interrupt_lookup *il = base->gen_dmac.il; |
1674 | u32 il_size = base->gen_dmac.il_size; |
1675 | |
1676 | spin_lock(lock: &base->interrupt_lock); |
1677 | |
1678 | /* Read interrupt status of both logical and physical channels */ |
1679 | for (i = 0; i < il_size; i++) |
1680 | regs[i] = readl(addr: base->virtbase + il[i].src); |
1681 | |
1682 | for (;;) { |
1683 | |
1684 | chan = find_next_bit(addr: (unsigned long *)regs, |
1685 | BITS_PER_LONG * il_size, offset: chan + 1); |
1686 | |
1687 | /* No more set bits found? */ |
1688 | if (chan == BITS_PER_LONG * il_size) |
1689 | break; |
1690 | |
1691 | row = chan / BITS_PER_LONG; |
1692 | idx = chan & (BITS_PER_LONG - 1); |
1693 | |
1694 | if (il[row].offset == D40_PHY_CHAN) |
1695 | d40c = base->lookup_phy_chans[idx]; |
1696 | else |
1697 | d40c = base->lookup_log_chans[il[row].offset + idx]; |
1698 | |
1699 | if (!d40c) { |
1700 | /* |
1701 | * No error because this can happen if something else |
1702 | * in the system is using the channel. |
1703 | */ |
1704 | continue; |
1705 | } |
1706 | |
1707 | /* ACK interrupt */ |
1708 | writel(BIT(idx), addr: base->virtbase + il[row].clr); |
1709 | |
1710 | spin_lock(lock: &d40c->lock); |
1711 | |
1712 | if (!il[row].is_error) |
1713 | dma_tc_handle(d40c); |
1714 | else |
1715 | d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n" , |
1716 | chan, il[row].offset, idx); |
1717 | |
1718 | spin_unlock(lock: &d40c->lock); |
1719 | } |
1720 | |
1721 | spin_unlock(lock: &base->interrupt_lock); |
1722 | |
1723 | return IRQ_HANDLED; |
1724 | } |
1725 | |
1726 | static int d40_validate_conf(struct d40_chan *d40c, |
1727 | struct stedma40_chan_cfg *conf) |
1728 | { |
1729 | int res = 0; |
1730 | bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; |
1731 | |
1732 | if (!conf->dir) { |
1733 | chan_err(d40c, "Invalid direction.\n" ); |
1734 | res = -EINVAL; |
1735 | } |
1736 | |
1737 | if ((is_log && conf->dev_type > d40c->base->num_log_chans) || |
1738 | (!is_log && conf->dev_type > d40c->base->num_phy_chans) || |
1739 | (conf->dev_type < 0)) { |
1740 | chan_err(d40c, "Invalid device type (%d)\n" , conf->dev_type); |
1741 | res = -EINVAL; |
1742 | } |
1743 | |
1744 | if (conf->dir == DMA_DEV_TO_DEV) { |
1745 | /* |
1746 | * DMAC HW supports it. Will be added to this driver, |
1747 | * in case any dma client requires it. |
1748 | */ |
1749 | chan_err(d40c, "periph to periph not supported\n" ); |
1750 | res = -EINVAL; |
1751 | } |
1752 | |
1753 | if (d40_psize_2_burst_size(is_log, psize: conf->src_info.psize) * |
1754 | conf->src_info.data_width != |
1755 | d40_psize_2_burst_size(is_log, psize: conf->dst_info.psize) * |
1756 | conf->dst_info.data_width) { |
1757 | /* |
1758 | * The DMAC hardware only supports |
1759 | * src (burst x width) == dst (burst x width) |
1760 | */ |
1761 | |
1762 | chan_err(d40c, "src (burst x width) != dst (burst x width)\n" ); |
1763 | res = -EINVAL; |
1764 | } |
1765 | |
1766 | return res; |
1767 | } |
1768 | |
1769 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, |
1770 | bool is_src, int log_event_line, bool is_log, |
1771 | bool *first_user) |
1772 | { |
1773 | unsigned long flags; |
1774 | spin_lock_irqsave(&phy->lock, flags); |
1775 | |
1776 | *first_user = ((phy->allocated_src | phy->allocated_dst) |
1777 | == D40_ALLOC_FREE); |
1778 | |
1779 | if (!is_log) { |
1780 | /* Physical interrupts are masked per physical full channel */ |
1781 | if (phy->allocated_src == D40_ALLOC_FREE && |
1782 | phy->allocated_dst == D40_ALLOC_FREE) { |
1783 | phy->allocated_dst = D40_ALLOC_PHY; |
1784 | phy->allocated_src = D40_ALLOC_PHY; |
1785 | goto found_unlock; |
1786 | } else |
1787 | goto not_found_unlock; |
1788 | } |
1789 | |
1790 | /* Logical channel */ |
1791 | if (is_src) { |
1792 | if (phy->allocated_src == D40_ALLOC_PHY) |
1793 | goto not_found_unlock; |
1794 | |
1795 | if (phy->allocated_src == D40_ALLOC_FREE) |
1796 | phy->allocated_src = D40_ALLOC_LOG_FREE; |
1797 | |
1798 | if (!(phy->allocated_src & BIT(log_event_line))) { |
1799 | phy->allocated_src |= BIT(log_event_line); |
1800 | goto found_unlock; |
1801 | } else |
1802 | goto not_found_unlock; |
1803 | } else { |
1804 | if (phy->allocated_dst == D40_ALLOC_PHY) |
1805 | goto not_found_unlock; |
1806 | |
1807 | if (phy->allocated_dst == D40_ALLOC_FREE) |
1808 | phy->allocated_dst = D40_ALLOC_LOG_FREE; |
1809 | |
1810 | if (!(phy->allocated_dst & BIT(log_event_line))) { |
1811 | phy->allocated_dst |= BIT(log_event_line); |
1812 | goto found_unlock; |
1813 | } |
1814 | } |
1815 | not_found_unlock: |
1816 | spin_unlock_irqrestore(lock: &phy->lock, flags); |
1817 | return false; |
1818 | found_unlock: |
1819 | spin_unlock_irqrestore(lock: &phy->lock, flags); |
1820 | return true; |
1821 | } |
1822 | |
1823 | static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, |
1824 | int log_event_line) |
1825 | { |
1826 | unsigned long flags; |
1827 | bool is_free = false; |
1828 | |
1829 | spin_lock_irqsave(&phy->lock, flags); |
1830 | if (!log_event_line) { |
1831 | phy->allocated_dst = D40_ALLOC_FREE; |
1832 | phy->allocated_src = D40_ALLOC_FREE; |
1833 | is_free = true; |
1834 | goto unlock; |
1835 | } |
1836 | |
1837 | /* Logical channel */ |
1838 | if (is_src) { |
1839 | phy->allocated_src &= ~BIT(log_event_line); |
1840 | if (phy->allocated_src == D40_ALLOC_LOG_FREE) |
1841 | phy->allocated_src = D40_ALLOC_FREE; |
1842 | } else { |
1843 | phy->allocated_dst &= ~BIT(log_event_line); |
1844 | if (phy->allocated_dst == D40_ALLOC_LOG_FREE) |
1845 | phy->allocated_dst = D40_ALLOC_FREE; |
1846 | } |
1847 | |
1848 | is_free = ((phy->allocated_src | phy->allocated_dst) == |
1849 | D40_ALLOC_FREE); |
1850 | unlock: |
1851 | spin_unlock_irqrestore(lock: &phy->lock, flags); |
1852 | |
1853 | return is_free; |
1854 | } |
1855 | |
1856 | static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) |
1857 | { |
1858 | int dev_type = d40c->dma_cfg.dev_type; |
1859 | int event_group; |
1860 | int event_line; |
1861 | struct d40_phy_res *phys; |
1862 | int i; |
1863 | int j; |
1864 | int log_num; |
1865 | int num_phy_chans; |
1866 | bool is_src; |
1867 | bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; |
1868 | |
1869 | phys = d40c->base->phy_res; |
1870 | num_phy_chans = d40c->base->num_phy_chans; |
1871 | |
1872 | if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { |
1873 | log_num = 2 * dev_type; |
1874 | is_src = true; |
1875 | } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || |
1876 | d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { |
1877 | /* dst event lines are used for logical memcpy */ |
1878 | log_num = 2 * dev_type + 1; |
1879 | is_src = false; |
1880 | } else |
1881 | return -EINVAL; |
1882 | |
1883 | event_group = D40_TYPE_TO_GROUP(dev_type); |
1884 | event_line = D40_TYPE_TO_EVENT(dev_type); |
1885 | |
1886 | if (!is_log) { |
1887 | if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { |
1888 | /* Find physical half channel */ |
1889 | if (d40c->dma_cfg.use_fixed_channel) { |
1890 | i = d40c->dma_cfg.phy_channel; |
1891 | if (d40_alloc_mask_set(phy: &phys[i], is_src, |
1892 | log_event_line: 0, is_log, |
1893 | first_user: first_phy_user)) |
1894 | goto found_phy; |
1895 | } else { |
1896 | for (i = 0; i < num_phy_chans; i++) { |
1897 | if (d40_alloc_mask_set(phy: &phys[i], is_src, |
1898 | log_event_line: 0, is_log, |
1899 | first_user: first_phy_user)) |
1900 | goto found_phy; |
1901 | } |
1902 | } |
1903 | } else |
1904 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { |
1905 | int phy_num = j + event_group * 2; |
1906 | for (i = phy_num; i < phy_num + 2; i++) { |
1907 | if (d40_alloc_mask_set(phy: &phys[i], |
1908 | is_src, |
1909 | log_event_line: 0, |
1910 | is_log, |
1911 | first_user: first_phy_user)) |
1912 | goto found_phy; |
1913 | } |
1914 | } |
1915 | return -EINVAL; |
1916 | found_phy: |
1917 | d40c->phy_chan = &phys[i]; |
1918 | d40c->log_num = D40_PHY_CHAN; |
1919 | goto out; |
1920 | } |
1921 | if (dev_type == -1) |
1922 | return -EINVAL; |
1923 | |
1924 | /* Find logical channel */ |
1925 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { |
1926 | int phy_num = j + event_group * 2; |
1927 | |
1928 | if (d40c->dma_cfg.use_fixed_channel) { |
1929 | i = d40c->dma_cfg.phy_channel; |
1930 | |
1931 | if ((i != phy_num) && (i != phy_num + 1)) { |
1932 | dev_err(chan2dev(d40c), |
1933 | "invalid fixed phy channel %d\n" , i); |
1934 | return -EINVAL; |
1935 | } |
1936 | |
1937 | if (d40_alloc_mask_set(phy: &phys[i], is_src, log_event_line: event_line, |
1938 | is_log, first_user: first_phy_user)) |
1939 | goto found_log; |
1940 | |
1941 | dev_err(chan2dev(d40c), |
1942 | "could not allocate fixed phy channel %d\n" , i); |
1943 | return -EINVAL; |
1944 | } |
1945 | |
1946 | /* |
1947 | * Spread logical channels across all available physical rather |
1948 | * than pack every logical channel at the first available phy |
1949 | * channels. |
1950 | */ |
1951 | if (is_src) { |
1952 | for (i = phy_num; i < phy_num + 2; i++) { |
1953 | if (d40_alloc_mask_set(phy: &phys[i], is_src, |
1954 | log_event_line: event_line, is_log, |
1955 | first_user: first_phy_user)) |
1956 | goto found_log; |
1957 | } |
1958 | } else { |
1959 | for (i = phy_num + 1; i >= phy_num; i--) { |
1960 | if (d40_alloc_mask_set(phy: &phys[i], is_src, |
1961 | log_event_line: event_line, is_log, |
1962 | first_user: first_phy_user)) |
1963 | goto found_log; |
1964 | } |
1965 | } |
1966 | } |
1967 | return -EINVAL; |
1968 | |
1969 | found_log: |
1970 | d40c->phy_chan = &phys[i]; |
1971 | d40c->log_num = log_num; |
1972 | out: |
1973 | |
1974 | if (is_log) |
1975 | d40c->base->lookup_log_chans[d40c->log_num] = d40c; |
1976 | else |
1977 | d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; |
1978 | |
1979 | return 0; |
1980 | |
1981 | } |
1982 | |
1983 | static int d40_config_memcpy(struct d40_chan *d40c) |
1984 | { |
1985 | dma_cap_mask_t cap = d40c->chan.device->cap_mask; |
1986 | |
1987 | if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { |
1988 | d40c->dma_cfg = dma40_memcpy_conf_log; |
1989 | d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id]; |
1990 | |
1991 | d40_log_cfg(cfg: &d40c->dma_cfg, |
1992 | lcsp1: &d40c->log_def.lcsp1, lcsp2: &d40c->log_def.lcsp3); |
1993 | |
1994 | } else if (dma_has_cap(DMA_MEMCPY, cap) && |
1995 | dma_has_cap(DMA_SLAVE, cap)) { |
1996 | d40c->dma_cfg = dma40_memcpy_conf_phy; |
1997 | |
1998 | /* Generate interrupt at end of transfer or relink. */ |
1999 | d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS); |
2000 | |
2001 | /* Generate interrupt on error. */ |
2002 | d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); |
2003 | d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); |
2004 | |
2005 | } else { |
2006 | chan_err(d40c, "No memcpy\n" ); |
2007 | return -EINVAL; |
2008 | } |
2009 | |
2010 | return 0; |
2011 | } |
2012 | |
2013 | static int d40_free_dma(struct d40_chan *d40c) |
2014 | { |
2015 | |
2016 | int res = 0; |
2017 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); |
2018 | struct d40_phy_res *phy = d40c->phy_chan; |
2019 | bool is_src; |
2020 | |
2021 | /* Terminate all queued and active transfers */ |
2022 | d40_term_all(d40c); |
2023 | |
2024 | if (phy == NULL) { |
2025 | chan_err(d40c, "phy == null\n" ); |
2026 | return -EINVAL; |
2027 | } |
2028 | |
2029 | if (phy->allocated_src == D40_ALLOC_FREE && |
2030 | phy->allocated_dst == D40_ALLOC_FREE) { |
2031 | chan_err(d40c, "channel already free\n" ); |
2032 | return -EINVAL; |
2033 | } |
2034 | |
2035 | if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || |
2036 | d40c->dma_cfg.dir == DMA_MEM_TO_MEM) |
2037 | is_src = false; |
2038 | else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) |
2039 | is_src = true; |
2040 | else { |
2041 | chan_err(d40c, "Unknown direction\n" ); |
2042 | return -EINVAL; |
2043 | } |
2044 | |
2045 | pm_runtime_get_sync(dev: d40c->base->dev); |
2046 | res = d40_channel_execute_command(d40c, command: D40_DMA_STOP); |
2047 | if (res) { |
2048 | chan_err(d40c, "stop failed\n" ); |
2049 | goto mark_last_busy; |
2050 | } |
2051 | |
2052 | d40_alloc_mask_free(phy, is_src, log_event_line: chan_is_logical(chan: d40c) ? event : 0); |
2053 | |
2054 | if (chan_is_logical(chan: d40c)) |
2055 | d40c->base->lookup_log_chans[d40c->log_num] = NULL; |
2056 | else |
2057 | d40c->base->lookup_phy_chans[phy->num] = NULL; |
2058 | |
2059 | if (d40c->busy) { |
2060 | pm_runtime_mark_last_busy(dev: d40c->base->dev); |
2061 | pm_runtime_put_autosuspend(dev: d40c->base->dev); |
2062 | } |
2063 | |
2064 | d40c->busy = false; |
2065 | d40c->phy_chan = NULL; |
2066 | d40c->configured = false; |
2067 | mark_last_busy: |
2068 | pm_runtime_mark_last_busy(dev: d40c->base->dev); |
2069 | pm_runtime_put_autosuspend(dev: d40c->base->dev); |
2070 | return res; |
2071 | } |
2072 | |
2073 | static bool d40_is_paused(struct d40_chan *d40c) |
2074 | { |
2075 | void __iomem *chanbase = chan_base(chan: d40c); |
2076 | bool is_paused = false; |
2077 | unsigned long flags; |
2078 | void __iomem *active_reg; |
2079 | u32 status; |
2080 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); |
2081 | |
2082 | spin_lock_irqsave(&d40c->lock, flags); |
2083 | |
2084 | if (chan_is_physical(chan: d40c)) { |
2085 | if (d40c->phy_chan->num % 2 == 0) |
2086 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; |
2087 | else |
2088 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; |
2089 | |
2090 | status = (readl(addr: active_reg) & |
2091 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> |
2092 | D40_CHAN_POS(d40c->phy_chan->num); |
2093 | if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) |
2094 | is_paused = true; |
2095 | goto unlock; |
2096 | } |
2097 | |
2098 | if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || |
2099 | d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { |
2100 | status = readl(addr: chanbase + D40_CHAN_REG_SDLNK); |
2101 | } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { |
2102 | status = readl(addr: chanbase + D40_CHAN_REG_SSLNK); |
2103 | } else { |
2104 | chan_err(d40c, "Unknown direction\n" ); |
2105 | goto unlock; |
2106 | } |
2107 | |
2108 | status = (status & D40_EVENTLINE_MASK(event)) >> |
2109 | D40_EVENTLINE_POS(event); |
2110 | |
2111 | if (status != D40_DMA_RUN) |
2112 | is_paused = true; |
2113 | unlock: |
2114 | spin_unlock_irqrestore(lock: &d40c->lock, flags); |
2115 | return is_paused; |
2116 | |
2117 | } |
2118 | |
2119 | static u32 stedma40_residue(struct dma_chan *chan) |
2120 | { |
2121 | struct d40_chan *d40c = |
2122 | container_of(chan, struct d40_chan, chan); |
2123 | u32 bytes_left; |
2124 | unsigned long flags; |
2125 | |
2126 | spin_lock_irqsave(&d40c->lock, flags); |
2127 | bytes_left = d40_residue(d40c); |
2128 | spin_unlock_irqrestore(lock: &d40c->lock, flags); |
2129 | |
2130 | return bytes_left; |
2131 | } |
2132 | |
2133 | static int |
2134 | d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, |
2135 | struct scatterlist *sg_src, struct scatterlist *sg_dst, |
2136 | unsigned int sg_len, dma_addr_t src_dev_addr, |
2137 | dma_addr_t dst_dev_addr) |
2138 | { |
2139 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
2140 | struct stedma40_half_channel_info *src_info = &cfg->src_info; |
2141 | struct stedma40_half_channel_info *dst_info = &cfg->dst_info; |
2142 | int ret; |
2143 | |
2144 | ret = d40_log_sg_to_lli(sg: sg_src, sg_len, |
2145 | dev_addr: src_dev_addr, |
2146 | lli_sg: desc->lli_log.src, |
2147 | lcsp13: chan->log_def.lcsp1, |
2148 | data_width1: src_info->data_width, |
2149 | data_width2: dst_info->data_width); |
2150 | |
2151 | ret = d40_log_sg_to_lli(sg: sg_dst, sg_len, |
2152 | dev_addr: dst_dev_addr, |
2153 | lli_sg: desc->lli_log.dst, |
2154 | lcsp13: chan->log_def.lcsp3, |
2155 | data_width1: dst_info->data_width, |
2156 | data_width2: src_info->data_width); |
2157 | |
2158 | return ret < 0 ? ret : 0; |
2159 | } |
2160 | |
2161 | static int |
2162 | d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, |
2163 | struct scatterlist *sg_src, struct scatterlist *sg_dst, |
2164 | unsigned int sg_len, dma_addr_t src_dev_addr, |
2165 | dma_addr_t dst_dev_addr) |
2166 | { |
2167 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
2168 | struct stedma40_half_channel_info *src_info = &cfg->src_info; |
2169 | struct stedma40_half_channel_info *dst_info = &cfg->dst_info; |
2170 | unsigned long flags = 0; |
2171 | int ret; |
2172 | |
2173 | if (desc->cyclic) |
2174 | flags |= LLI_CYCLIC | LLI_TERM_INT; |
2175 | |
2176 | ret = d40_phy_sg_to_lli(sg: sg_src, sg_len, target: src_dev_addr, |
2177 | lli: desc->lli_phy.src, |
2178 | virt_to_phys(address: desc->lli_phy.src), |
2179 | reg_cfg: chan->src_def_cfg, |
2180 | info: src_info, otherinfo: dst_info, flags); |
2181 | |
2182 | ret = d40_phy_sg_to_lli(sg: sg_dst, sg_len, target: dst_dev_addr, |
2183 | lli: desc->lli_phy.dst, |
2184 | virt_to_phys(address: desc->lli_phy.dst), |
2185 | reg_cfg: chan->dst_def_cfg, |
2186 | info: dst_info, otherinfo: src_info, flags); |
2187 | |
2188 | dma_sync_single_for_device(dev: chan->base->dev, addr: desc->lli_pool.dma_addr, |
2189 | size: desc->lli_pool.size, dir: DMA_TO_DEVICE); |
2190 | |
2191 | return ret < 0 ? ret : 0; |
2192 | } |
2193 | |
2194 | static struct d40_desc * |
2195 | d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, |
2196 | unsigned int sg_len, unsigned long dma_flags) |
2197 | { |
2198 | struct stedma40_chan_cfg *cfg; |
2199 | struct d40_desc *desc; |
2200 | int ret; |
2201 | |
2202 | desc = d40_desc_get(d40c: chan); |
2203 | if (!desc) |
2204 | return NULL; |
2205 | |
2206 | cfg = &chan->dma_cfg; |
2207 | desc->lli_len = d40_sg_2_dmalen(sgl: sg, sg_len, data_width1: cfg->src_info.data_width, |
2208 | data_width2: cfg->dst_info.data_width); |
2209 | if (desc->lli_len < 0) { |
2210 | chan_err(chan, "Unaligned size\n" ); |
2211 | goto free_desc; |
2212 | } |
2213 | |
2214 | ret = d40_pool_lli_alloc(d40c: chan, d40d: desc, lli_len: desc->lli_len); |
2215 | if (ret < 0) { |
2216 | chan_err(chan, "Could not allocate lli\n" ); |
2217 | goto free_desc; |
2218 | } |
2219 | |
2220 | desc->lli_current = 0; |
2221 | desc->txd.flags = dma_flags; |
2222 | desc->txd.tx_submit = d40_tx_submit; |
2223 | |
2224 | dma_async_tx_descriptor_init(tx: &desc->txd, chan: &chan->chan); |
2225 | |
2226 | return desc; |
2227 | free_desc: |
2228 | d40_desc_free(d40c: chan, d40d: desc); |
2229 | return NULL; |
2230 | } |
2231 | |
2232 | static struct dma_async_tx_descriptor * |
2233 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, |
2234 | struct scatterlist *sg_dst, unsigned int sg_len, |
2235 | enum dma_transfer_direction direction, unsigned long dma_flags) |
2236 | { |
2237 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); |
2238 | dma_addr_t src_dev_addr; |
2239 | dma_addr_t dst_dev_addr; |
2240 | struct d40_desc *desc; |
2241 | unsigned long flags; |
2242 | int ret; |
2243 | |
2244 | if (!chan->phy_chan) { |
2245 | chan_err(chan, "Cannot prepare unallocated channel\n" ); |
2246 | return NULL; |
2247 | } |
2248 | |
2249 | d40_set_runtime_config_write(chan: dchan, config: &chan->slave_config, direction); |
2250 | |
2251 | spin_lock_irqsave(&chan->lock, flags); |
2252 | |
2253 | desc = d40_prep_desc(chan, sg: sg_src, sg_len, dma_flags); |
2254 | if (desc == NULL) |
2255 | goto unlock; |
2256 | |
2257 | if (sg_next(&sg_src[sg_len - 1]) == sg_src) |
2258 | desc->cyclic = true; |
2259 | |
2260 | src_dev_addr = 0; |
2261 | dst_dev_addr = 0; |
2262 | if (direction == DMA_DEV_TO_MEM) |
2263 | src_dev_addr = chan->runtime_addr; |
2264 | else if (direction == DMA_MEM_TO_DEV) |
2265 | dst_dev_addr = chan->runtime_addr; |
2266 | |
2267 | if (chan_is_logical(chan)) |
2268 | ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, |
2269 | sg_len, src_dev_addr, dst_dev_addr); |
2270 | else |
2271 | ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, |
2272 | sg_len, src_dev_addr, dst_dev_addr); |
2273 | |
2274 | if (ret) { |
2275 | chan_err(chan, "Failed to prepare %s sg job: %d\n" , |
2276 | chan_is_logical(chan) ? "log" : "phy" , ret); |
2277 | goto free_desc; |
2278 | } |
2279 | |
2280 | /* |
2281 | * add descriptor to the prepare queue in order to be able |
2282 | * to free them later in terminate_all |
2283 | */ |
2284 | list_add_tail(new: &desc->node, head: &chan->prepare_queue); |
2285 | |
2286 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
2287 | |
2288 | return &desc->txd; |
2289 | free_desc: |
2290 | d40_desc_free(d40c: chan, d40d: desc); |
2291 | unlock: |
2292 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
2293 | return NULL; |
2294 | } |
2295 | |
2296 | static bool stedma40_filter(struct dma_chan *chan, void *data) |
2297 | { |
2298 | struct stedma40_chan_cfg *info = data; |
2299 | struct d40_chan *d40c = |
2300 | container_of(chan, struct d40_chan, chan); |
2301 | int err; |
2302 | |
2303 | if (data) { |
2304 | err = d40_validate_conf(d40c, conf: info); |
2305 | if (!err) |
2306 | d40c->dma_cfg = *info; |
2307 | } else |
2308 | err = d40_config_memcpy(d40c); |
2309 | |
2310 | if (!err) |
2311 | d40c->configured = true; |
2312 | |
2313 | return err == 0; |
2314 | } |
2315 | |
2316 | static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) |
2317 | { |
2318 | bool realtime = d40c->dma_cfg.realtime; |
2319 | bool highprio = d40c->dma_cfg.high_priority; |
2320 | u32 rtreg; |
2321 | u32 event = D40_TYPE_TO_EVENT(dev_type); |
2322 | u32 group = D40_TYPE_TO_GROUP(dev_type); |
2323 | u32 bit = BIT(event); |
2324 | u32 prioreg; |
2325 | struct d40_gen_dmac *dmac = &d40c->base->gen_dmac; |
2326 | |
2327 | rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear; |
2328 | /* |
2329 | * Due to a hardware bug, in some cases a logical channel triggered by |
2330 | * a high priority destination event line can generate extra packet |
2331 | * transactions. |
2332 | * |
2333 | * The workaround is to not set the high priority level for the |
2334 | * destination event lines that trigger logical channels. |
2335 | */ |
2336 | if (!src && chan_is_logical(chan: d40c)) |
2337 | highprio = false; |
2338 | |
2339 | prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear; |
2340 | |
2341 | /* Destination event lines are stored in the upper halfword */ |
2342 | if (!src) |
2343 | bit <<= 16; |
2344 | |
2345 | writel(val: bit, addr: d40c->base->virtbase + prioreg + group * 4); |
2346 | writel(val: bit, addr: d40c->base->virtbase + rtreg + group * 4); |
2347 | } |
2348 | |
2349 | static void d40_set_prio_realtime(struct d40_chan *d40c) |
2350 | { |
2351 | if (d40c->base->rev < 3) |
2352 | return; |
2353 | |
2354 | if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || |
2355 | (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) |
2356 | __d40_set_prio_rt(d40c, dev_type: d40c->dma_cfg.dev_type, src: true); |
2357 | |
2358 | if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) || |
2359 | (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) |
2360 | __d40_set_prio_rt(d40c, dev_type: d40c->dma_cfg.dev_type, src: false); |
2361 | } |
2362 | |
2363 | #define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1) |
2364 | #define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1) |
2365 | #define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1) |
2366 | #define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1) |
2367 | #define D40_DT_FLAGS_HIGH_PRIO(flags) ((flags >> 4) & 0x1) |
2368 | |
2369 | static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec, |
2370 | struct of_dma *ofdma) |
2371 | { |
2372 | struct stedma40_chan_cfg cfg; |
2373 | dma_cap_mask_t cap; |
2374 | u32 flags; |
2375 | |
2376 | memset(&cfg, 0, sizeof(struct stedma40_chan_cfg)); |
2377 | |
2378 | dma_cap_zero(cap); |
2379 | dma_cap_set(DMA_SLAVE, cap); |
2380 | |
2381 | cfg.dev_type = dma_spec->args[0]; |
2382 | flags = dma_spec->args[2]; |
2383 | |
2384 | switch (D40_DT_FLAGS_MODE(flags)) { |
2385 | case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break; |
2386 | case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break; |
2387 | } |
2388 | |
2389 | switch (D40_DT_FLAGS_DIR(flags)) { |
2390 | case 0: |
2391 | cfg.dir = DMA_MEM_TO_DEV; |
2392 | cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); |
2393 | break; |
2394 | case 1: |
2395 | cfg.dir = DMA_DEV_TO_MEM; |
2396 | cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); |
2397 | break; |
2398 | } |
2399 | |
2400 | if (D40_DT_FLAGS_FIXED_CHAN(flags)) { |
2401 | cfg.phy_channel = dma_spec->args[1]; |
2402 | cfg.use_fixed_channel = true; |
2403 | } |
2404 | |
2405 | if (D40_DT_FLAGS_HIGH_PRIO(flags)) |
2406 | cfg.high_priority = true; |
2407 | |
2408 | return dma_request_channel(cap, stedma40_filter, &cfg); |
2409 | } |
2410 | |
2411 | /* DMA ENGINE functions */ |
2412 | static int d40_alloc_chan_resources(struct dma_chan *chan) |
2413 | { |
2414 | int err; |
2415 | unsigned long flags; |
2416 | struct d40_chan *d40c = |
2417 | container_of(chan, struct d40_chan, chan); |
2418 | bool is_free_phy; |
2419 | spin_lock_irqsave(&d40c->lock, flags); |
2420 | |
2421 | dma_cookie_init(chan); |
2422 | |
2423 | /* If no dma configuration is set use default configuration (memcpy) */ |
2424 | if (!d40c->configured) { |
2425 | err = d40_config_memcpy(d40c); |
2426 | if (err) { |
2427 | chan_err(d40c, "Failed to configure memcpy channel\n" ); |
2428 | goto mark_last_busy; |
2429 | } |
2430 | } |
2431 | |
2432 | err = d40_allocate_channel(d40c, first_phy_user: &is_free_phy); |
2433 | if (err) { |
2434 | chan_err(d40c, "Failed to allocate channel\n" ); |
2435 | d40c->configured = false; |
2436 | goto mark_last_busy; |
2437 | } |
2438 | |
2439 | pm_runtime_get_sync(dev: d40c->base->dev); |
2440 | |
2441 | d40_set_prio_realtime(d40c); |
2442 | |
2443 | if (chan_is_logical(chan: d40c)) { |
2444 | if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) |
2445 | d40c->lcpa = d40c->base->lcpa_base + |
2446 | d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE; |
2447 | else |
2448 | d40c->lcpa = d40c->base->lcpa_base + |
2449 | d40c->dma_cfg.dev_type * |
2450 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; |
2451 | |
2452 | /* Unmask the Global Interrupt Mask. */ |
2453 | d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); |
2454 | d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); |
2455 | } |
2456 | |
2457 | dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n" , |
2458 | chan_is_logical(d40c) ? "logical" : "physical" , |
2459 | d40c->phy_chan->num, |
2460 | d40c->dma_cfg.use_fixed_channel ? ", fixed" : "" ); |
2461 | |
2462 | |
2463 | /* |
2464 | * Only write channel configuration to the DMA if the physical |
2465 | * resource is free. In case of multiple logical channels |
2466 | * on the same physical resource, only the first write is necessary. |
2467 | */ |
2468 | if (is_free_phy) |
2469 | d40_config_write(d40c); |
2470 | mark_last_busy: |
2471 | pm_runtime_mark_last_busy(dev: d40c->base->dev); |
2472 | pm_runtime_put_autosuspend(dev: d40c->base->dev); |
2473 | spin_unlock_irqrestore(lock: &d40c->lock, flags); |
2474 | return err; |
2475 | } |
2476 | |
2477 | static void d40_free_chan_resources(struct dma_chan *chan) |
2478 | { |
2479 | struct d40_chan *d40c = |
2480 | container_of(chan, struct d40_chan, chan); |
2481 | int err; |
2482 | unsigned long flags; |
2483 | |
2484 | if (d40c->phy_chan == NULL) { |
2485 | chan_err(d40c, "Cannot free unallocated channel\n" ); |
2486 | return; |
2487 | } |
2488 | |
2489 | spin_lock_irqsave(&d40c->lock, flags); |
2490 | |
2491 | err = d40_free_dma(d40c); |
2492 | |
2493 | if (err) |
2494 | chan_err(d40c, "Failed to free channel\n" ); |
2495 | spin_unlock_irqrestore(lock: &d40c->lock, flags); |
2496 | } |
2497 | |
2498 | static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, |
2499 | dma_addr_t dst, |
2500 | dma_addr_t src, |
2501 | size_t size, |
2502 | unsigned long dma_flags) |
2503 | { |
2504 | struct scatterlist dst_sg; |
2505 | struct scatterlist src_sg; |
2506 | |
2507 | sg_init_table(&dst_sg, 1); |
2508 | sg_init_table(&src_sg, 1); |
2509 | |
2510 | sg_dma_address(&dst_sg) = dst; |
2511 | sg_dma_address(&src_sg) = src; |
2512 | |
2513 | sg_dma_len(&dst_sg) = size; |
2514 | sg_dma_len(&src_sg) = size; |
2515 | |
2516 | return d40_prep_sg(dchan: chan, sg_src: &src_sg, sg_dst: &dst_sg, sg_len: 1, |
2517 | direction: DMA_MEM_TO_MEM, dma_flags); |
2518 | } |
2519 | |
2520 | static struct dma_async_tx_descriptor * |
2521 | d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
2522 | unsigned int sg_len, enum dma_transfer_direction direction, |
2523 | unsigned long dma_flags, void *context) |
2524 | { |
2525 | if (!is_slave_direction(direction)) |
2526 | return NULL; |
2527 | |
2528 | return d40_prep_sg(dchan: chan, sg_src: sgl, sg_dst: sgl, sg_len, direction, dma_flags); |
2529 | } |
2530 | |
2531 | static struct dma_async_tx_descriptor * |
2532 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
2533 | size_t buf_len, size_t period_len, |
2534 | enum dma_transfer_direction direction, unsigned long flags) |
2535 | { |
2536 | unsigned int periods = buf_len / period_len; |
2537 | struct dma_async_tx_descriptor *txd; |
2538 | struct scatterlist *sg; |
2539 | int i; |
2540 | |
2541 | sg = kcalloc(n: periods + 1, size: sizeof(struct scatterlist), GFP_NOWAIT); |
2542 | if (!sg) |
2543 | return NULL; |
2544 | |
2545 | for (i = 0; i < periods; i++) { |
2546 | sg_dma_address(&sg[i]) = dma_addr; |
2547 | sg_dma_len(&sg[i]) = period_len; |
2548 | dma_addr += period_len; |
2549 | } |
2550 | |
2551 | sg_chain(prv: sg, prv_nents: periods + 1, sgl: sg); |
2552 | |
2553 | txd = d40_prep_sg(dchan: chan, sg_src: sg, sg_dst: sg, sg_len: periods, direction, |
2554 | dma_flags: DMA_PREP_INTERRUPT); |
2555 | |
2556 | kfree(objp: sg); |
2557 | |
2558 | return txd; |
2559 | } |
2560 | |
2561 | static enum dma_status d40_tx_status(struct dma_chan *chan, |
2562 | dma_cookie_t cookie, |
2563 | struct dma_tx_state *txstate) |
2564 | { |
2565 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2566 | enum dma_status ret; |
2567 | |
2568 | if (d40c->phy_chan == NULL) { |
2569 | chan_err(d40c, "Cannot read status of unallocated channel\n" ); |
2570 | return -EINVAL; |
2571 | } |
2572 | |
2573 | ret = dma_cookie_status(chan, cookie, state: txstate); |
2574 | if (ret != DMA_COMPLETE && txstate) |
2575 | dma_set_residue(state: txstate, residue: stedma40_residue(chan)); |
2576 | |
2577 | if (d40_is_paused(d40c)) |
2578 | ret = DMA_PAUSED; |
2579 | |
2580 | return ret; |
2581 | } |
2582 | |
2583 | static void d40_issue_pending(struct dma_chan *chan) |
2584 | { |
2585 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2586 | unsigned long flags; |
2587 | |
2588 | if (d40c->phy_chan == NULL) { |
2589 | chan_err(d40c, "Channel is not allocated!\n" ); |
2590 | return; |
2591 | } |
2592 | |
2593 | spin_lock_irqsave(&d40c->lock, flags); |
2594 | |
2595 | list_splice_tail_init(list: &d40c->pending_queue, head: &d40c->queue); |
2596 | |
2597 | /* Busy means that queued jobs are already being processed */ |
2598 | if (!d40c->busy) |
2599 | (void) d40_queue_start(d40c); |
2600 | |
2601 | spin_unlock_irqrestore(lock: &d40c->lock, flags); |
2602 | } |
2603 | |
2604 | static int d40_terminate_all(struct dma_chan *chan) |
2605 | { |
2606 | unsigned long flags; |
2607 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2608 | int ret; |
2609 | |
2610 | if (d40c->phy_chan == NULL) { |
2611 | chan_err(d40c, "Channel is not allocated!\n" ); |
2612 | return -EINVAL; |
2613 | } |
2614 | |
2615 | spin_lock_irqsave(&d40c->lock, flags); |
2616 | |
2617 | pm_runtime_get_sync(dev: d40c->base->dev); |
2618 | ret = d40_channel_execute_command(d40c, command: D40_DMA_STOP); |
2619 | if (ret) |
2620 | chan_err(d40c, "Failed to stop channel\n" ); |
2621 | |
2622 | d40_term_all(d40c); |
2623 | pm_runtime_mark_last_busy(dev: d40c->base->dev); |
2624 | pm_runtime_put_autosuspend(dev: d40c->base->dev); |
2625 | if (d40c->busy) { |
2626 | pm_runtime_mark_last_busy(dev: d40c->base->dev); |
2627 | pm_runtime_put_autosuspend(dev: d40c->base->dev); |
2628 | } |
2629 | d40c->busy = false; |
2630 | |
2631 | spin_unlock_irqrestore(lock: &d40c->lock, flags); |
2632 | return 0; |
2633 | } |
2634 | |
2635 | static int |
2636 | dma40_config_to_halfchannel(struct d40_chan *d40c, |
2637 | struct stedma40_half_channel_info *info, |
2638 | u32 maxburst) |
2639 | { |
2640 | int psize; |
2641 | |
2642 | if (chan_is_logical(chan: d40c)) { |
2643 | if (maxburst >= 16) |
2644 | psize = STEDMA40_PSIZE_LOG_16; |
2645 | else if (maxburst >= 8) |
2646 | psize = STEDMA40_PSIZE_LOG_8; |
2647 | else if (maxburst >= 4) |
2648 | psize = STEDMA40_PSIZE_LOG_4; |
2649 | else |
2650 | psize = STEDMA40_PSIZE_LOG_1; |
2651 | } else { |
2652 | if (maxburst >= 16) |
2653 | psize = STEDMA40_PSIZE_PHY_16; |
2654 | else if (maxburst >= 8) |
2655 | psize = STEDMA40_PSIZE_PHY_8; |
2656 | else if (maxburst >= 4) |
2657 | psize = STEDMA40_PSIZE_PHY_4; |
2658 | else |
2659 | psize = STEDMA40_PSIZE_PHY_1; |
2660 | } |
2661 | |
2662 | info->psize = psize; |
2663 | info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; |
2664 | |
2665 | return 0; |
2666 | } |
2667 | |
2668 | static int d40_set_runtime_config(struct dma_chan *chan, |
2669 | struct dma_slave_config *config) |
2670 | { |
2671 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2672 | |
2673 | memcpy(&d40c->slave_config, config, sizeof(*config)); |
2674 | |
2675 | return 0; |
2676 | } |
2677 | |
2678 | /* Runtime reconfiguration extension */ |
2679 | static int d40_set_runtime_config_write(struct dma_chan *chan, |
2680 | struct dma_slave_config *config, |
2681 | enum dma_transfer_direction direction) |
2682 | { |
2683 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2684 | struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; |
2685 | enum dma_slave_buswidth src_addr_width, dst_addr_width; |
2686 | dma_addr_t config_addr; |
2687 | u32 src_maxburst, dst_maxburst; |
2688 | int ret; |
2689 | |
2690 | if (d40c->phy_chan == NULL) { |
2691 | chan_err(d40c, "Channel is not allocated!\n" ); |
2692 | return -EINVAL; |
2693 | } |
2694 | |
2695 | src_addr_width = config->src_addr_width; |
2696 | src_maxburst = config->src_maxburst; |
2697 | dst_addr_width = config->dst_addr_width; |
2698 | dst_maxburst = config->dst_maxburst; |
2699 | |
2700 | if (direction == DMA_DEV_TO_MEM) { |
2701 | config_addr = config->src_addr; |
2702 | |
2703 | if (cfg->dir != DMA_DEV_TO_MEM) |
2704 | dev_dbg(d40c->base->dev, |
2705 | "channel was not configured for peripheral " |
2706 | "to memory transfer (%d) overriding\n" , |
2707 | cfg->dir); |
2708 | cfg->dir = DMA_DEV_TO_MEM; |
2709 | |
2710 | /* Configure the memory side */ |
2711 | if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) |
2712 | dst_addr_width = src_addr_width; |
2713 | if (dst_maxburst == 0) |
2714 | dst_maxburst = src_maxburst; |
2715 | |
2716 | } else if (direction == DMA_MEM_TO_DEV) { |
2717 | config_addr = config->dst_addr; |
2718 | |
2719 | if (cfg->dir != DMA_MEM_TO_DEV) |
2720 | dev_dbg(d40c->base->dev, |
2721 | "channel was not configured for memory " |
2722 | "to peripheral transfer (%d) overriding\n" , |
2723 | cfg->dir); |
2724 | cfg->dir = DMA_MEM_TO_DEV; |
2725 | |
2726 | /* Configure the memory side */ |
2727 | if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) |
2728 | src_addr_width = dst_addr_width; |
2729 | if (src_maxburst == 0) |
2730 | src_maxburst = dst_maxburst; |
2731 | } else { |
2732 | dev_err(d40c->base->dev, |
2733 | "unrecognized channel direction %d\n" , |
2734 | direction); |
2735 | return -EINVAL; |
2736 | } |
2737 | |
2738 | if (config_addr <= 0) { |
2739 | dev_err(d40c->base->dev, "no address supplied\n" ); |
2740 | return -EINVAL; |
2741 | } |
2742 | |
2743 | if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { |
2744 | dev_err(d40c->base->dev, |
2745 | "src/dst width/maxburst mismatch: %d*%d != %d*%d\n" , |
2746 | src_maxburst, |
2747 | src_addr_width, |
2748 | dst_maxburst, |
2749 | dst_addr_width); |
2750 | return -EINVAL; |
2751 | } |
2752 | |
2753 | if (src_maxburst > 16) { |
2754 | src_maxburst = 16; |
2755 | dst_maxburst = src_maxburst * src_addr_width / dst_addr_width; |
2756 | } else if (dst_maxburst > 16) { |
2757 | dst_maxburst = 16; |
2758 | src_maxburst = dst_maxburst * dst_addr_width / src_addr_width; |
2759 | } |
2760 | |
2761 | /* Only valid widths are; 1, 2, 4 and 8. */ |
2762 | if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || |
2763 | src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || |
2764 | dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || |
2765 | dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || |
2766 | !is_power_of_2(n: src_addr_width) || |
2767 | !is_power_of_2(n: dst_addr_width)) |
2768 | return -EINVAL; |
2769 | |
2770 | cfg->src_info.data_width = src_addr_width; |
2771 | cfg->dst_info.data_width = dst_addr_width; |
2772 | |
2773 | ret = dma40_config_to_halfchannel(d40c, info: &cfg->src_info, |
2774 | maxburst: src_maxburst); |
2775 | if (ret) |
2776 | return ret; |
2777 | |
2778 | ret = dma40_config_to_halfchannel(d40c, info: &cfg->dst_info, |
2779 | maxburst: dst_maxburst); |
2780 | if (ret) |
2781 | return ret; |
2782 | |
2783 | /* Fill in register values */ |
2784 | if (chan_is_logical(chan: d40c)) |
2785 | d40_log_cfg(cfg, lcsp1: &d40c->log_def.lcsp1, lcsp2: &d40c->log_def.lcsp3); |
2786 | else |
2787 | d40_phy_cfg(cfg, src_cfg: &d40c->src_def_cfg, dst_cfg: &d40c->dst_def_cfg); |
2788 | |
2789 | /* These settings will take precedence later */ |
2790 | d40c->runtime_addr = config_addr; |
2791 | d40c->runtime_direction = direction; |
2792 | dev_dbg(d40c->base->dev, |
2793 | "configured channel %s for %s, data width %d/%d, " |
2794 | "maxburst %d/%d elements, LE, no flow control\n" , |
2795 | dma_chan_name(chan), |
2796 | (direction == DMA_DEV_TO_MEM) ? "RX" : "TX" , |
2797 | src_addr_width, dst_addr_width, |
2798 | src_maxburst, dst_maxburst); |
2799 | |
2800 | return 0; |
2801 | } |
2802 | |
2803 | /* Initialization functions */ |
2804 | |
2805 | static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, |
2806 | struct d40_chan *chans, int offset, |
2807 | int num_chans) |
2808 | { |
2809 | int i = 0; |
2810 | struct d40_chan *d40c; |
2811 | |
2812 | INIT_LIST_HEAD(list: &dma->channels); |
2813 | |
2814 | for (i = offset; i < offset + num_chans; i++) { |
2815 | d40c = &chans[i]; |
2816 | d40c->base = base; |
2817 | d40c->chan.device = dma; |
2818 | |
2819 | spin_lock_init(&d40c->lock); |
2820 | |
2821 | d40c->log_num = D40_PHY_CHAN; |
2822 | |
2823 | INIT_LIST_HEAD(list: &d40c->done); |
2824 | INIT_LIST_HEAD(list: &d40c->active); |
2825 | INIT_LIST_HEAD(list: &d40c->queue); |
2826 | INIT_LIST_HEAD(list: &d40c->pending_queue); |
2827 | INIT_LIST_HEAD(list: &d40c->client); |
2828 | INIT_LIST_HEAD(list: &d40c->prepare_queue); |
2829 | |
2830 | tasklet_setup(t: &d40c->tasklet, callback: dma_tasklet); |
2831 | |
2832 | list_add_tail(new: &d40c->chan.device_node, |
2833 | head: &dma->channels); |
2834 | } |
2835 | } |
2836 | |
2837 | static void d40_ops_init(struct d40_base *base, struct dma_device *dev) |
2838 | { |
2839 | if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) { |
2840 | dev->device_prep_slave_sg = d40_prep_slave_sg; |
2841 | dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
2842 | } |
2843 | |
2844 | if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { |
2845 | dev->device_prep_dma_memcpy = d40_prep_memcpy; |
2846 | dev->directions = BIT(DMA_MEM_TO_MEM); |
2847 | /* |
2848 | * This controller can only access address at even |
2849 | * 32bit boundaries, i.e. 2^2 |
2850 | */ |
2851 | dev->copy_align = DMAENGINE_ALIGN_4_BYTES; |
2852 | } |
2853 | |
2854 | if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) |
2855 | dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; |
2856 | |
2857 | dev->device_alloc_chan_resources = d40_alloc_chan_resources; |
2858 | dev->device_free_chan_resources = d40_free_chan_resources; |
2859 | dev->device_issue_pending = d40_issue_pending; |
2860 | dev->device_tx_status = d40_tx_status; |
2861 | dev->device_config = d40_set_runtime_config; |
2862 | dev->device_pause = d40_pause; |
2863 | dev->device_resume = d40_resume; |
2864 | dev->device_terminate_all = d40_terminate_all; |
2865 | dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
2866 | dev->dev = base->dev; |
2867 | } |
2868 | |
2869 | static int __init d40_dmaengine_init(struct d40_base *base, |
2870 | int num_reserved_chans) |
2871 | { |
2872 | int err ; |
2873 | |
2874 | d40_chan_init(base, dma: &base->dma_slave, chans: base->log_chans, |
2875 | offset: 0, num_chans: base->num_log_chans); |
2876 | |
2877 | dma_cap_zero(base->dma_slave.cap_mask); |
2878 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); |
2879 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); |
2880 | |
2881 | d40_ops_init(base, dev: &base->dma_slave); |
2882 | |
2883 | err = dmaenginem_async_device_register(device: &base->dma_slave); |
2884 | |
2885 | if (err) { |
2886 | d40_err(base->dev, "Failed to register slave channels\n" ); |
2887 | goto exit; |
2888 | } |
2889 | |
2890 | d40_chan_init(base, dma: &base->dma_memcpy, chans: base->log_chans, |
2891 | offset: base->num_log_chans, num_chans: base->num_memcpy_chans); |
2892 | |
2893 | dma_cap_zero(base->dma_memcpy.cap_mask); |
2894 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); |
2895 | |
2896 | d40_ops_init(base, dev: &base->dma_memcpy); |
2897 | |
2898 | err = dmaenginem_async_device_register(device: &base->dma_memcpy); |
2899 | |
2900 | if (err) { |
2901 | d40_err(base->dev, |
2902 | "Failed to register memcpy only channels\n" ); |
2903 | goto exit; |
2904 | } |
2905 | |
2906 | d40_chan_init(base, dma: &base->dma_both, chans: base->phy_chans, |
2907 | offset: 0, num_chans: num_reserved_chans); |
2908 | |
2909 | dma_cap_zero(base->dma_both.cap_mask); |
2910 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); |
2911 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); |
2912 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); |
2913 | |
2914 | d40_ops_init(base, dev: &base->dma_both); |
2915 | err = dmaenginem_async_device_register(device: &base->dma_both); |
2916 | |
2917 | if (err) { |
2918 | d40_err(base->dev, |
2919 | "Failed to register logical and physical capable channels\n" ); |
2920 | goto exit; |
2921 | } |
2922 | return 0; |
2923 | exit: |
2924 | return err; |
2925 | } |
2926 | |
2927 | /* Suspend resume functionality */ |
2928 | #ifdef CONFIG_PM_SLEEP |
2929 | static int dma40_suspend(struct device *dev) |
2930 | { |
2931 | struct d40_base *base = dev_get_drvdata(dev); |
2932 | int ret; |
2933 | |
2934 | ret = pm_runtime_force_suspend(dev); |
2935 | if (ret) |
2936 | return ret; |
2937 | |
2938 | if (base->lcpa_regulator) |
2939 | ret = regulator_disable(regulator: base->lcpa_regulator); |
2940 | return ret; |
2941 | } |
2942 | |
2943 | static int dma40_resume(struct device *dev) |
2944 | { |
2945 | struct d40_base *base = dev_get_drvdata(dev); |
2946 | int ret = 0; |
2947 | |
2948 | if (base->lcpa_regulator) { |
2949 | ret = regulator_enable(regulator: base->lcpa_regulator); |
2950 | if (ret) |
2951 | return ret; |
2952 | } |
2953 | |
2954 | return pm_runtime_force_resume(dev); |
2955 | } |
2956 | #endif |
2957 | |
2958 | #ifdef CONFIG_PM |
2959 | static void dma40_backup(void __iomem *baseaddr, u32 *backup, |
2960 | u32 *regaddr, int num, bool save) |
2961 | { |
2962 | int i; |
2963 | |
2964 | for (i = 0; i < num; i++) { |
2965 | void __iomem *addr = baseaddr + regaddr[i]; |
2966 | |
2967 | if (save) |
2968 | backup[i] = readl_relaxed(addr); |
2969 | else |
2970 | writel_relaxed(backup[i], addr); |
2971 | } |
2972 | } |
2973 | |
2974 | static void d40_save_restore_registers(struct d40_base *base, bool save) |
2975 | { |
2976 | int i; |
2977 | |
2978 | /* Save/Restore channel specific registers */ |
2979 | for (i = 0; i < base->num_phy_chans; i++) { |
2980 | void __iomem *addr; |
2981 | int idx; |
2982 | |
2983 | if (base->phy_res[i].reserved) |
2984 | continue; |
2985 | |
2986 | addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; |
2987 | idx = i * ARRAY_SIZE(d40_backup_regs_chan); |
2988 | |
2989 | dma40_backup(baseaddr: addr, backup: &base->reg_val_backup_chan[idx], |
2990 | regaddr: d40_backup_regs_chan, |
2991 | ARRAY_SIZE(d40_backup_regs_chan), |
2992 | save); |
2993 | } |
2994 | |
2995 | /* Save/Restore global registers */ |
2996 | dma40_backup(baseaddr: base->virtbase, backup: base->reg_val_backup, |
2997 | regaddr: d40_backup_regs, ARRAY_SIZE(d40_backup_regs), |
2998 | save); |
2999 | |
3000 | /* Save/Restore registers only existing on dma40 v3 and later */ |
3001 | if (base->gen_dmac.backup) |
3002 | dma40_backup(baseaddr: base->virtbase, backup: base->reg_val_backup_v4, |
3003 | regaddr: base->gen_dmac.backup, |
3004 | num: base->gen_dmac.backup_size, |
3005 | save); |
3006 | } |
3007 | |
3008 | static int dma40_runtime_suspend(struct device *dev) |
3009 | { |
3010 | struct d40_base *base = dev_get_drvdata(dev); |
3011 | |
3012 | d40_save_restore_registers(base, save: true); |
3013 | |
3014 | /* Don't disable/enable clocks for v1 due to HW bugs */ |
3015 | if (base->rev != 1) |
3016 | writel_relaxed(base->gcc_pwr_off_mask, |
3017 | base->virtbase + D40_DREG_GCC); |
3018 | |
3019 | return 0; |
3020 | } |
3021 | |
3022 | static int dma40_runtime_resume(struct device *dev) |
3023 | { |
3024 | struct d40_base *base = dev_get_drvdata(dev); |
3025 | |
3026 | d40_save_restore_registers(base, save: false); |
3027 | |
3028 | writel_relaxed(D40_DREG_GCC_ENABLE_ALL, |
3029 | base->virtbase + D40_DREG_GCC); |
3030 | return 0; |
3031 | } |
3032 | #endif |
3033 | |
3034 | static const struct dev_pm_ops dma40_pm_ops = { |
3035 | SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume) |
3036 | SET_RUNTIME_PM_OPS(dma40_runtime_suspend, |
3037 | dma40_runtime_resume, |
3038 | NULL) |
3039 | }; |
3040 | |
3041 | /* Initialization functions. */ |
3042 | |
3043 | static int __init d40_phy_res_init(struct d40_base *base) |
3044 | { |
3045 | int i; |
3046 | int num_phy_chans_avail = 0; |
3047 | u32 val[2]; |
3048 | int odd_even_bit = -2; |
3049 | int gcc = D40_DREG_GCC_ENA; |
3050 | |
3051 | val[0] = readl(addr: base->virtbase + D40_DREG_PRSME); |
3052 | val[1] = readl(addr: base->virtbase + D40_DREG_PRSMO); |
3053 | |
3054 | for (i = 0; i < base->num_phy_chans; i++) { |
3055 | base->phy_res[i].num = i; |
3056 | odd_even_bit += 2 * ((i % 2) == 0); |
3057 | if (((val[i % 2] >> odd_even_bit) & 3) == 1) { |
3058 | /* Mark security only channels as occupied */ |
3059 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; |
3060 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; |
3061 | base->phy_res[i].reserved = true; |
3062 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), |
3063 | D40_DREG_GCC_SRC); |
3064 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), |
3065 | D40_DREG_GCC_DST); |
3066 | |
3067 | |
3068 | } else { |
3069 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; |
3070 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; |
3071 | base->phy_res[i].reserved = false; |
3072 | num_phy_chans_avail++; |
3073 | } |
3074 | spin_lock_init(&base->phy_res[i].lock); |
3075 | } |
3076 | |
3077 | /* Mark disabled channels as occupied */ |
3078 | for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { |
3079 | int chan = base->plat_data->disabled_channels[i]; |
3080 | |
3081 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; |
3082 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; |
3083 | base->phy_res[chan].reserved = true; |
3084 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), |
3085 | D40_DREG_GCC_SRC); |
3086 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), |
3087 | D40_DREG_GCC_DST); |
3088 | num_phy_chans_avail--; |
3089 | } |
3090 | |
3091 | /* Mark soft_lli channels */ |
3092 | for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) { |
3093 | int chan = base->plat_data->soft_lli_chans[i]; |
3094 | |
3095 | base->phy_res[chan].use_soft_lli = true; |
3096 | } |
3097 | |
3098 | dev_info(base->dev, "%d of %d physical DMA channels available\n" , |
3099 | num_phy_chans_avail, base->num_phy_chans); |
3100 | |
3101 | /* Verify settings extended vs standard */ |
3102 | val[0] = readl(addr: base->virtbase + D40_DREG_PRTYP); |
3103 | |
3104 | for (i = 0; i < base->num_phy_chans; i++) { |
3105 | |
3106 | if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && |
3107 | (val[0] & 0x3) != 1) |
3108 | dev_info(base->dev, |
3109 | "[%s] INFO: channel %d is misconfigured (%d)\n" , |
3110 | __func__, i, val[0] & 0x3); |
3111 | |
3112 | val[0] = val[0] >> 2; |
3113 | } |
3114 | |
3115 | /* |
3116 | * To keep things simple, Enable all clocks initially. |
3117 | * The clocks will get managed later post channel allocation. |
3118 | * The clocks for the event lines on which reserved channels exists |
3119 | * are not managed here. |
3120 | */ |
3121 | writel(D40_DREG_GCC_ENABLE_ALL, addr: base->virtbase + D40_DREG_GCC); |
3122 | base->gcc_pwr_off_mask = gcc; |
3123 | |
3124 | return num_phy_chans_avail; |
3125 | } |
3126 | |
3127 | /* Called from the registered devm action */ |
3128 | static void d40_drop_kmem_cache_action(void *d) |
3129 | { |
3130 | struct kmem_cache *desc_slab = d; |
3131 | |
3132 | kmem_cache_destroy(s: desc_slab); |
3133 | } |
3134 | |
3135 | static int __init d40_hw_detect_init(struct platform_device *pdev, |
3136 | struct d40_base **retbase) |
3137 | { |
3138 | struct stedma40_platform_data *plat_data = dev_get_platdata(dev: &pdev->dev); |
3139 | struct device *dev = &pdev->dev; |
3140 | struct clk *clk; |
3141 | void __iomem *virtbase; |
3142 | struct d40_base *base; |
3143 | int num_log_chans; |
3144 | int num_phy_chans; |
3145 | int num_memcpy_chans; |
3146 | int i; |
3147 | u32 pid; |
3148 | u32 cid; |
3149 | u8 rev; |
3150 | int ret; |
3151 | |
3152 | clk = devm_clk_get_enabled(dev, NULL); |
3153 | if (IS_ERR(ptr: clk)) |
3154 | return PTR_ERR(ptr: clk); |
3155 | |
3156 | /* Get IO for DMAC base address */ |
3157 | virtbase = devm_platform_ioremap_resource_byname(pdev, name: "base" ); |
3158 | if (IS_ERR(ptr: virtbase)) |
3159 | return PTR_ERR(ptr: virtbase); |
3160 | |
3161 | /* This is just a regular AMBA PrimeCell ID actually */ |
3162 | for (pid = 0, i = 0; i < 4; i++) |
3163 | pid |= (readl(addr: virtbase + SZ_4K - 0x20 + 4 * i) |
3164 | & 255) << (i * 8); |
3165 | for (cid = 0, i = 0; i < 4; i++) |
3166 | cid |= (readl(addr: virtbase + SZ_4K - 0x10 + 4 * i) |
3167 | & 255) << (i * 8); |
3168 | |
3169 | if (cid != AMBA_CID) { |
3170 | d40_err(dev, "Unknown hardware! No PrimeCell ID\n" ); |
3171 | return -EINVAL; |
3172 | } |
3173 | if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { |
3174 | d40_err(dev, "Unknown designer! Got %x wanted %x\n" , |
3175 | AMBA_MANF_BITS(pid), |
3176 | AMBA_VENDOR_ST); |
3177 | return -EINVAL; |
3178 | } |
3179 | /* |
3180 | * HW revision: |
3181 | * DB8500ed has revision 0 |
3182 | * ? has revision 1 |
3183 | * DB8500v1 has revision 2 |
3184 | * DB8500v2 has revision 3 |
3185 | * AP9540v1 has revision 4 |
3186 | * DB8540v1 has revision 4 |
3187 | */ |
3188 | rev = AMBA_REV_BITS(pid); |
3189 | if (rev < 2) { |
3190 | d40_err(dev, "hardware revision: %d is not supported" , rev); |
3191 | return -EINVAL; |
3192 | } |
3193 | |
3194 | /* The number of physical channels on this HW */ |
3195 | if (plat_data->num_of_phy_chans) |
3196 | num_phy_chans = plat_data->num_of_phy_chans; |
3197 | else |
3198 | num_phy_chans = 4 * (readl(addr: virtbase + D40_DREG_ICFG) & 0x7) + 4; |
3199 | |
3200 | /* The number of channels used for memcpy */ |
3201 | if (plat_data->num_of_memcpy_chans) |
3202 | num_memcpy_chans = plat_data->num_of_memcpy_chans; |
3203 | else |
3204 | num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels); |
3205 | |
3206 | num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY; |
3207 | |
3208 | dev_info(dev, |
3209 | "hardware rev: %d with %d physical and %d logical channels\n" , |
3210 | rev, num_phy_chans, num_log_chans); |
3211 | |
3212 | base = devm_kzalloc(dev, |
3213 | ALIGN(sizeof(struct d40_base), 4) + |
3214 | (num_phy_chans + num_log_chans + num_memcpy_chans) * |
3215 | sizeof(struct d40_chan), GFP_KERNEL); |
3216 | |
3217 | if (!base) |
3218 | return -ENOMEM; |
3219 | |
3220 | base->rev = rev; |
3221 | base->clk = clk; |
3222 | base->num_memcpy_chans = num_memcpy_chans; |
3223 | base->num_phy_chans = num_phy_chans; |
3224 | base->num_log_chans = num_log_chans; |
3225 | base->virtbase = virtbase; |
3226 | base->plat_data = plat_data; |
3227 | base->dev = dev; |
3228 | base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); |
3229 | base->log_chans = &base->phy_chans[num_phy_chans]; |
3230 | |
3231 | if (base->plat_data->num_of_phy_chans == 14) { |
3232 | base->gen_dmac.backup = d40_backup_regs_v4b; |
3233 | base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B; |
3234 | base->gen_dmac.interrupt_en = D40_DREG_CPCMIS; |
3235 | base->gen_dmac.interrupt_clear = D40_DREG_CPCICR; |
3236 | base->gen_dmac.realtime_en = D40_DREG_CRSEG1; |
3237 | base->gen_dmac.realtime_clear = D40_DREG_CRCEG1; |
3238 | base->gen_dmac.high_prio_en = D40_DREG_CPSEG1; |
3239 | base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1; |
3240 | base->gen_dmac.il = il_v4b; |
3241 | base->gen_dmac.il_size = ARRAY_SIZE(il_v4b); |
3242 | base->gen_dmac.init_reg = dma_init_reg_v4b; |
3243 | base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b); |
3244 | } else { |
3245 | if (base->rev >= 3) { |
3246 | base->gen_dmac.backup = d40_backup_regs_v4a; |
3247 | base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A; |
3248 | } |
3249 | base->gen_dmac.interrupt_en = D40_DREG_PCMIS; |
3250 | base->gen_dmac.interrupt_clear = D40_DREG_PCICR; |
3251 | base->gen_dmac.realtime_en = D40_DREG_RSEG1; |
3252 | base->gen_dmac.realtime_clear = D40_DREG_RCEG1; |
3253 | base->gen_dmac.high_prio_en = D40_DREG_PSEG1; |
3254 | base->gen_dmac.high_prio_clear = D40_DREG_PCEG1; |
3255 | base->gen_dmac.il = il_v4a; |
3256 | base->gen_dmac.il_size = ARRAY_SIZE(il_v4a); |
3257 | base->gen_dmac.init_reg = dma_init_reg_v4a; |
3258 | base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a); |
3259 | } |
3260 | |
3261 | base->phy_res = devm_kcalloc(dev, n: num_phy_chans, |
3262 | size: sizeof(*base->phy_res), |
3263 | GFP_KERNEL); |
3264 | if (!base->phy_res) |
3265 | return -ENOMEM; |
3266 | |
3267 | base->lookup_phy_chans = devm_kcalloc(dev, n: num_phy_chans, |
3268 | size: sizeof(*base->lookup_phy_chans), |
3269 | GFP_KERNEL); |
3270 | if (!base->lookup_phy_chans) |
3271 | return -ENOMEM; |
3272 | |
3273 | base->lookup_log_chans = devm_kcalloc(dev, n: num_log_chans, |
3274 | size: sizeof(*base->lookup_log_chans), |
3275 | GFP_KERNEL); |
3276 | if (!base->lookup_log_chans) |
3277 | return -ENOMEM; |
3278 | |
3279 | base->reg_val_backup_chan = devm_kmalloc_array(dev, n: base->num_phy_chans, |
3280 | size: sizeof(d40_backup_regs_chan), |
3281 | GFP_KERNEL); |
3282 | if (!base->reg_val_backup_chan) |
3283 | return -ENOMEM; |
3284 | |
3285 | base->lcla_pool.alloc_map = devm_kcalloc(dev, n: num_phy_chans |
3286 | * D40_LCLA_LINK_PER_EVENT_GRP, |
3287 | size: sizeof(*base->lcla_pool.alloc_map), |
3288 | GFP_KERNEL); |
3289 | if (!base->lcla_pool.alloc_map) |
3290 | return -ENOMEM; |
3291 | |
3292 | base->regs_interrupt = devm_kmalloc_array(dev, n: base->gen_dmac.il_size, |
3293 | size: sizeof(*base->regs_interrupt), |
3294 | GFP_KERNEL); |
3295 | if (!base->regs_interrupt) |
3296 | return -ENOMEM; |
3297 | |
3298 | base->desc_slab = kmem_cache_create(D40_NAME, size: sizeof(struct d40_desc), |
3299 | align: 0, SLAB_HWCACHE_ALIGN, |
3300 | NULL); |
3301 | if (!base->desc_slab) |
3302 | return -ENOMEM; |
3303 | |
3304 | ret = devm_add_action_or_reset(dev, d40_drop_kmem_cache_action, |
3305 | base->desc_slab); |
3306 | if (ret) |
3307 | return ret; |
3308 | |
3309 | *retbase = base; |
3310 | |
3311 | return 0; |
3312 | } |
3313 | |
3314 | static void __init d40_hw_init(struct d40_base *base) |
3315 | { |
3316 | |
3317 | int i; |
3318 | u32 prmseo[2] = {0, 0}; |
3319 | u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; |
3320 | u32 pcmis = 0; |
3321 | u32 pcicr = 0; |
3322 | struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg; |
3323 | u32 reg_size = base->gen_dmac.init_reg_size; |
3324 | |
3325 | for (i = 0; i < reg_size; i++) |
3326 | writel(val: dma_init_reg[i].val, |
3327 | addr: base->virtbase + dma_init_reg[i].reg); |
3328 | |
3329 | /* Configure all our dma channels to default settings */ |
3330 | for (i = 0; i < base->num_phy_chans; i++) { |
3331 | |
3332 | activeo[i % 2] = activeo[i % 2] << 2; |
3333 | |
3334 | if (base->phy_res[base->num_phy_chans - i - 1].allocated_src |
3335 | == D40_ALLOC_PHY) { |
3336 | activeo[i % 2] |= 3; |
3337 | continue; |
3338 | } |
3339 | |
3340 | /* Enable interrupt # */ |
3341 | pcmis = (pcmis << 1) | 1; |
3342 | |
3343 | /* Clear interrupt # */ |
3344 | pcicr = (pcicr << 1) | 1; |
3345 | |
3346 | /* Set channel to physical mode */ |
3347 | prmseo[i % 2] = prmseo[i % 2] << 2; |
3348 | prmseo[i % 2] |= 1; |
3349 | |
3350 | } |
3351 | |
3352 | writel(val: prmseo[1], addr: base->virtbase + D40_DREG_PRMSE); |
3353 | writel(val: prmseo[0], addr: base->virtbase + D40_DREG_PRMSO); |
3354 | writel(val: activeo[1], addr: base->virtbase + D40_DREG_ACTIVE); |
3355 | writel(val: activeo[0], addr: base->virtbase + D40_DREG_ACTIVO); |
3356 | |
3357 | /* Write which interrupt to enable */ |
3358 | writel(val: pcmis, addr: base->virtbase + base->gen_dmac.interrupt_en); |
3359 | |
3360 | /* Write which interrupt to clear */ |
3361 | writel(val: pcicr, addr: base->virtbase + base->gen_dmac.interrupt_clear); |
3362 | |
3363 | /* These are __initdata and cannot be accessed after init */ |
3364 | base->gen_dmac.init_reg = NULL; |
3365 | base->gen_dmac.init_reg_size = 0; |
3366 | } |
3367 | |
3368 | static int __init d40_lcla_allocate(struct d40_base *base) |
3369 | { |
3370 | struct d40_lcla_pool *pool = &base->lcla_pool; |
3371 | unsigned long *page_list; |
3372 | int i, j; |
3373 | int ret; |
3374 | |
3375 | /* |
3376 | * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, |
3377 | * To full fill this hardware requirement without wasting 256 kb |
3378 | * we allocate pages until we get an aligned one. |
3379 | */ |
3380 | page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS, |
3381 | size: sizeof(*page_list), |
3382 | GFP_KERNEL); |
3383 | if (!page_list) |
3384 | return -ENOMEM; |
3385 | |
3386 | /* Calculating how many pages that are required */ |
3387 | base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; |
3388 | |
3389 | for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) { |
3390 | page_list[i] = __get_free_pages(GFP_KERNEL, |
3391 | order: base->lcla_pool.pages); |
3392 | if (!page_list[i]) { |
3393 | |
3394 | d40_err(base->dev, "Failed to allocate %d pages.\n" , |
3395 | base->lcla_pool.pages); |
3396 | ret = -ENOMEM; |
3397 | |
3398 | for (j = 0; j < i; j++) |
3399 | free_pages(addr: page_list[j], order: base->lcla_pool.pages); |
3400 | goto free_page_list; |
3401 | } |
3402 | |
3403 | if ((virt_to_phys(address: (void *)page_list[i]) & |
3404 | (LCLA_ALIGNMENT - 1)) == 0) |
3405 | break; |
3406 | } |
3407 | |
3408 | for (j = 0; j < i; j++) |
3409 | free_pages(addr: page_list[j], order: base->lcla_pool.pages); |
3410 | |
3411 | if (i < MAX_LCLA_ALLOC_ATTEMPTS) { |
3412 | base->lcla_pool.base = (void *)page_list[i]; |
3413 | } else { |
3414 | /* |
3415 | * After many attempts and no succees with finding the correct |
3416 | * alignment, try with allocating a big buffer. |
3417 | */ |
3418 | dev_warn(base->dev, |
3419 | "[%s] Failed to get %d pages @ 18 bit align.\n" , |
3420 | __func__, base->lcla_pool.pages); |
3421 | base->lcla_pool.base_unaligned = kmalloc(SZ_1K * |
3422 | base->num_phy_chans + |
3423 | LCLA_ALIGNMENT, |
3424 | GFP_KERNEL); |
3425 | if (!base->lcla_pool.base_unaligned) { |
3426 | ret = -ENOMEM; |
3427 | goto free_page_list; |
3428 | } |
3429 | |
3430 | base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, |
3431 | LCLA_ALIGNMENT); |
3432 | } |
3433 | |
3434 | pool->dma_addr = dma_map_single(base->dev, pool->base, |
3435 | SZ_1K * base->num_phy_chans, |
3436 | DMA_TO_DEVICE); |
3437 | if (dma_mapping_error(dev: base->dev, dma_addr: pool->dma_addr)) { |
3438 | pool->dma_addr = 0; |
3439 | ret = -ENOMEM; |
3440 | goto free_page_list; |
3441 | } |
3442 | |
3443 | writel(virt_to_phys(address: base->lcla_pool.base), |
3444 | addr: base->virtbase + D40_DREG_LCLA); |
3445 | ret = 0; |
3446 | free_page_list: |
3447 | kfree(objp: page_list); |
3448 | return ret; |
3449 | } |
3450 | |
3451 | static int __init d40_of_probe(struct device *dev, |
3452 | struct device_node *np) |
3453 | { |
3454 | struct stedma40_platform_data *pdata; |
3455 | int num_phy = 0, num_memcpy = 0, num_disabled = 0; |
3456 | const __be32 *list; |
3457 | |
3458 | pdata = devm_kzalloc(dev, size: sizeof(*pdata), GFP_KERNEL); |
3459 | if (!pdata) |
3460 | return -ENOMEM; |
3461 | |
3462 | /* If absent this value will be obtained from h/w. */ |
3463 | of_property_read_u32(np, propname: "dma-channels" , out_value: &num_phy); |
3464 | if (num_phy > 0) |
3465 | pdata->num_of_phy_chans = num_phy; |
3466 | |
3467 | list = of_get_property(node: np, name: "memcpy-channels" , lenp: &num_memcpy); |
3468 | num_memcpy /= sizeof(*list); |
3469 | |
3470 | if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) { |
3471 | d40_err(dev, |
3472 | "Invalid number of memcpy channels specified (%d)\n" , |
3473 | num_memcpy); |
3474 | return -EINVAL; |
3475 | } |
3476 | pdata->num_of_memcpy_chans = num_memcpy; |
3477 | |
3478 | of_property_read_u32_array(np, propname: "memcpy-channels" , |
3479 | out_values: dma40_memcpy_channels, |
3480 | sz: num_memcpy); |
3481 | |
3482 | list = of_get_property(node: np, name: "disabled-channels" , lenp: &num_disabled); |
3483 | num_disabled /= sizeof(*list); |
3484 | |
3485 | if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) { |
3486 | d40_err(dev, |
3487 | "Invalid number of disabled channels specified (%d)\n" , |
3488 | num_disabled); |
3489 | return -EINVAL; |
3490 | } |
3491 | |
3492 | of_property_read_u32_array(np, propname: "disabled-channels" , |
3493 | out_values: pdata->disabled_channels, |
3494 | sz: num_disabled); |
3495 | pdata->disabled_channels[num_disabled] = -1; |
3496 | |
3497 | dev->platform_data = pdata; |
3498 | |
3499 | return 0; |
3500 | } |
3501 | |
3502 | static int __init d40_probe(struct platform_device *pdev) |
3503 | { |
3504 | struct device *dev = &pdev->dev; |
3505 | struct device_node *np = pdev->dev.of_node; |
3506 | struct device_node *np_lcpa; |
3507 | struct d40_base *base; |
3508 | struct resource *res; |
3509 | struct resource res_lcpa; |
3510 | int num_reserved_chans; |
3511 | u32 val; |
3512 | int ret; |
3513 | |
3514 | if (d40_of_probe(dev, np)) { |
3515 | ret = -ENOMEM; |
3516 | goto report_failure; |
3517 | } |
3518 | |
3519 | ret = d40_hw_detect_init(pdev, retbase: &base); |
3520 | if (ret) |
3521 | goto report_failure; |
3522 | |
3523 | num_reserved_chans = d40_phy_res_init(base); |
3524 | |
3525 | platform_set_drvdata(pdev, data: base); |
3526 | |
3527 | spin_lock_init(&base->interrupt_lock); |
3528 | spin_lock_init(&base->execmd_lock); |
3529 | |
3530 | /* Get IO for logical channel parameter address (LCPA) */ |
3531 | np_lcpa = of_parse_phandle(np, phandle_name: "sram" , index: 0); |
3532 | if (!np_lcpa) { |
3533 | dev_err(dev, "no LCPA SRAM node\n" ); |
3534 | ret = -EINVAL; |
3535 | goto report_failure; |
3536 | } |
3537 | /* This is no device so read the address directly from the node */ |
3538 | ret = of_address_to_resource(dev: np_lcpa, index: 0, r: &res_lcpa); |
3539 | if (ret) { |
3540 | dev_err(dev, "no LCPA SRAM resource\n" ); |
3541 | goto report_failure; |
3542 | } |
3543 | base->lcpa_size = resource_size(res: &res_lcpa); |
3544 | base->phy_lcpa = res_lcpa.start; |
3545 | dev_info(dev, "found LCPA SRAM at %pad, size %pa\n" , |
3546 | &base->phy_lcpa, &base->lcpa_size); |
3547 | |
3548 | /* We make use of ESRAM memory for this. */ |
3549 | val = readl(addr: base->virtbase + D40_DREG_LCPA); |
3550 | if (base->phy_lcpa != val && val != 0) { |
3551 | dev_warn(dev, |
3552 | "[%s] Mismatch LCPA dma 0x%x, def %08x\n" , |
3553 | __func__, val, (u32)base->phy_lcpa); |
3554 | } else |
3555 | writel(val: base->phy_lcpa, addr: base->virtbase + D40_DREG_LCPA); |
3556 | |
3557 | base->lcpa_base = devm_ioremap(dev, offset: base->phy_lcpa, size: base->lcpa_size); |
3558 | if (!base->lcpa_base) { |
3559 | ret = -ENOMEM; |
3560 | d40_err(dev, "Failed to ioremap LCPA region\n" ); |
3561 | goto report_failure; |
3562 | } |
3563 | /* If lcla has to be located in ESRAM we don't need to allocate */ |
3564 | if (base->plat_data->use_esram_lcla) { |
3565 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
3566 | "lcla_esram" ); |
3567 | if (!res) { |
3568 | ret = -ENOENT; |
3569 | d40_err(dev, |
3570 | "No \"lcla_esram\" memory resource\n" ); |
3571 | goto report_failure; |
3572 | } |
3573 | base->lcla_pool.base = devm_ioremap(dev, offset: res->start, |
3574 | size: resource_size(res)); |
3575 | if (!base->lcla_pool.base) { |
3576 | ret = -ENOMEM; |
3577 | d40_err(dev, "Failed to ioremap LCLA region\n" ); |
3578 | goto report_failure; |
3579 | } |
3580 | writel(val: res->start, addr: base->virtbase + D40_DREG_LCLA); |
3581 | |
3582 | } else { |
3583 | ret = d40_lcla_allocate(base); |
3584 | if (ret) { |
3585 | d40_err(dev, "Failed to allocate LCLA area\n" ); |
3586 | goto destroy_cache; |
3587 | } |
3588 | } |
3589 | |
3590 | spin_lock_init(&base->lcla_pool.lock); |
3591 | |
3592 | base->irq = platform_get_irq(pdev, 0); |
3593 | if (base->irq < 0) { |
3594 | ret = base->irq; |
3595 | goto destroy_cache; |
3596 | } |
3597 | |
3598 | ret = request_irq(irq: base->irq, handler: d40_handle_interrupt, flags: 0, D40_NAME, dev: base); |
3599 | if (ret) { |
3600 | d40_err(dev, "No IRQ defined\n" ); |
3601 | goto destroy_cache; |
3602 | } |
3603 | |
3604 | if (base->plat_data->use_esram_lcla) { |
3605 | |
3606 | base->lcpa_regulator = regulator_get(dev: base->dev, id: "lcla_esram" ); |
3607 | if (IS_ERR(ptr: base->lcpa_regulator)) { |
3608 | d40_err(dev, "Failed to get lcpa_regulator\n" ); |
3609 | ret = PTR_ERR(ptr: base->lcpa_regulator); |
3610 | base->lcpa_regulator = NULL; |
3611 | goto destroy_cache; |
3612 | } |
3613 | |
3614 | ret = regulator_enable(regulator: base->lcpa_regulator); |
3615 | if (ret) { |
3616 | d40_err(dev, |
3617 | "Failed to enable lcpa_regulator\n" ); |
3618 | regulator_put(regulator: base->lcpa_regulator); |
3619 | base->lcpa_regulator = NULL; |
3620 | goto destroy_cache; |
3621 | } |
3622 | } |
3623 | |
3624 | writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); |
3625 | |
3626 | pm_runtime_irq_safe(dev: base->dev); |
3627 | pm_runtime_set_autosuspend_delay(dev: base->dev, DMA40_AUTOSUSPEND_DELAY); |
3628 | pm_runtime_use_autosuspend(dev: base->dev); |
3629 | pm_runtime_mark_last_busy(dev: base->dev); |
3630 | pm_runtime_set_active(dev: base->dev); |
3631 | pm_runtime_enable(dev: base->dev); |
3632 | |
3633 | ret = d40_dmaengine_init(base, num_reserved_chans); |
3634 | if (ret) |
3635 | goto destroy_cache; |
3636 | |
3637 | ret = dma_set_max_seg_size(dev: base->dev, STEDMA40_MAX_SEG_SIZE); |
3638 | if (ret) { |
3639 | d40_err(dev, "Failed to set dma max seg size\n" ); |
3640 | goto destroy_cache; |
3641 | } |
3642 | |
3643 | d40_hw_init(base); |
3644 | |
3645 | ret = of_dma_controller_register(np, of_dma_xlate: d40_xlate, NULL); |
3646 | if (ret) { |
3647 | dev_err(dev, |
3648 | "could not register of_dma_controller\n" ); |
3649 | goto destroy_cache; |
3650 | } |
3651 | |
3652 | dev_info(base->dev, "initialized\n" ); |
3653 | return 0; |
3654 | |
3655 | destroy_cache: |
3656 | if (base->lcla_pool.dma_addr) |
3657 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, |
3658 | SZ_1K * base->num_phy_chans, |
3659 | DMA_TO_DEVICE); |
3660 | |
3661 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) |
3662 | free_pages(addr: (unsigned long)base->lcla_pool.base, |
3663 | order: base->lcla_pool.pages); |
3664 | |
3665 | kfree(objp: base->lcla_pool.base_unaligned); |
3666 | |
3667 | if (base->lcpa_regulator) { |
3668 | regulator_disable(regulator: base->lcpa_regulator); |
3669 | regulator_put(regulator: base->lcpa_regulator); |
3670 | } |
3671 | pm_runtime_disable(dev: base->dev); |
3672 | |
3673 | report_failure: |
3674 | d40_err(dev, "probe failed\n" ); |
3675 | return ret; |
3676 | } |
3677 | |
3678 | static const struct of_device_id d40_match[] = { |
3679 | { .compatible = "stericsson,dma40" , }, |
3680 | {} |
3681 | }; |
3682 | |
3683 | static struct platform_driver d40_driver = { |
3684 | .driver = { |
3685 | .name = D40_NAME, |
3686 | .pm = &dma40_pm_ops, |
3687 | .of_match_table = d40_match, |
3688 | }, |
3689 | }; |
3690 | |
3691 | static int __init stedma40_init(void) |
3692 | { |
3693 | return platform_driver_probe(&d40_driver, d40_probe); |
3694 | } |
3695 | subsys_initcall(stedma40_init); |
3696 | |