1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) |
4 | * |
5 | * Copyright (C) 2008 Atmel Corporation |
6 | * Copyright (C) 2022 Microchip Technology, Inc. and its subsidiaries |
7 | * |
8 | * This supports the Atmel AHB DMA Controller found in several Atmel SoCs. |
9 | * The only Atmel DMA Controller that is not covered by this driver is the one |
10 | * found on AT91SAM9263. |
11 | */ |
12 | |
13 | #include <dt-bindings/dma/at91.h> |
14 | #include <linux/bitfield.h> |
15 | #include <linux/clk.h> |
16 | #include <linux/dmaengine.h> |
17 | #include <linux/dmapool.h> |
18 | #include <linux/dma-mapping.h> |
19 | #include <linux/interrupt.h> |
20 | #include <linux/module.h> |
21 | #include <linux/of.h> |
22 | #include <linux/overflow.h> |
23 | #include <linux/of_platform.h> |
24 | #include <linux/of_dma.h> |
25 | #include <linux/platform_device.h> |
26 | #include <linux/slab.h> |
27 | |
28 | #include "dmaengine.h" |
29 | #include "virt-dma.h" |
30 | |
31 | /* |
32 | * Glossary |
33 | * -------- |
34 | * |
35 | * at_hdmac : Name of the ATmel AHB DMA Controller |
36 | * at_dma_ / atdma : ATmel DMA controller entity related |
37 | * atc_ / atchan : ATmel DMA Channel entity related |
38 | */ |
39 | |
40 | #define AT_DMA_MAX_NR_CHANNELS 8 |
41 | |
42 | /* Global Configuration Register */ |
43 | #define AT_DMA_GCFG 0x00 |
44 | #define AT_DMA_IF_BIGEND(i) BIT((i)) /* AHB-Lite Interface i in Big-endian mode */ |
45 | #define AT_DMA_ARB_CFG BIT(4) /* Arbiter mode. */ |
46 | |
47 | /* Controller Enable Register */ |
48 | #define AT_DMA_EN 0x04 |
49 | #define AT_DMA_ENABLE BIT(0) |
50 | |
51 | /* Software Single Request Register */ |
52 | #define AT_DMA_SREQ 0x08 |
53 | #define AT_DMA_SSREQ(x) BIT((x) << 1) /* Request a source single transfer on channel x */ |
54 | #define AT_DMA_DSREQ(x) BIT(1 + ((x) << 1)) /* Request a destination single transfer on channel x */ |
55 | |
56 | /* Software Chunk Transfer Request Register */ |
57 | #define AT_DMA_CREQ 0x0c |
58 | #define AT_DMA_SCREQ(x) BIT((x) << 1) /* Request a source chunk transfer on channel x */ |
59 | #define AT_DMA_DCREQ(x) BIT(1 + ((x) << 1)) /* Request a destination chunk transfer on channel x */ |
60 | |
61 | /* Software Last Transfer Flag Register */ |
62 | #define AT_DMA_LAST 0x10 |
63 | #define AT_DMA_SLAST(x) BIT((x) << 1) /* This src rq is last tx of buffer on channel x */ |
64 | #define AT_DMA_DLAST(x) BIT(1 + ((x) << 1)) /* This dst rq is last tx of buffer on channel x */ |
65 | |
66 | /* Request Synchronization Register */ |
67 | #define AT_DMA_SYNC 0x14 |
68 | #define AT_DMA_SYR(h) BIT((h)) /* Synchronize handshake line h */ |
69 | |
70 | /* Error, Chained Buffer transfer completed and Buffer transfer completed Interrupt registers */ |
71 | #define AT_DMA_EBCIER 0x18 /* Enable register */ |
72 | #define AT_DMA_EBCIDR 0x1c /* Disable register */ |
73 | #define AT_DMA_EBCIMR 0x20 /* Mask Register */ |
74 | #define AT_DMA_EBCISR 0x24 /* Status Register */ |
75 | #define AT_DMA_CBTC_OFFSET 8 |
76 | #define AT_DMA_ERR_OFFSET 16 |
77 | #define AT_DMA_BTC(x) BIT((x)) |
78 | #define AT_DMA_CBTC(x) BIT(AT_DMA_CBTC_OFFSET + (x)) |
79 | #define AT_DMA_ERR(x) BIT(AT_DMA_ERR_OFFSET + (x)) |
80 | |
81 | /* Channel Handler Enable Register */ |
82 | #define AT_DMA_CHER 0x28 |
83 | #define AT_DMA_ENA(x) BIT((x)) |
84 | #define AT_DMA_SUSP(x) BIT(8 + (x)) |
85 | #define AT_DMA_KEEP(x) BIT(24 + (x)) |
86 | |
87 | /* Channel Handler Disable Register */ |
88 | #define AT_DMA_CHDR 0x2c |
89 | #define AT_DMA_DIS(x) BIT(x) |
90 | #define AT_DMA_RES(x) BIT(8 + (x)) |
91 | |
92 | /* Channel Handler Status Register */ |
93 | #define AT_DMA_CHSR 0x30 |
94 | #define AT_DMA_EMPT(x) BIT(16 + (x)) |
95 | #define AT_DMA_STAL(x) BIT(24 + (x)) |
96 | |
97 | /* Channel registers base address */ |
98 | #define AT_DMA_CH_REGS_BASE 0x3c |
99 | #define ch_regs(x) (AT_DMA_CH_REGS_BASE + (x) * 0x28) /* Channel x base addr */ |
100 | |
101 | /* Hardware register offset for each channel */ |
102 | #define ATC_SADDR_OFFSET 0x00 /* Source Address Register */ |
103 | #define ATC_DADDR_OFFSET 0x04 /* Destination Address Register */ |
104 | #define ATC_DSCR_OFFSET 0x08 /* Descriptor Address Register */ |
105 | #define ATC_CTRLA_OFFSET 0x0c /* Control A Register */ |
106 | #define ATC_CTRLB_OFFSET 0x10 /* Control B Register */ |
107 | #define ATC_CFG_OFFSET 0x14 /* Configuration Register */ |
108 | #define ATC_SPIP_OFFSET 0x18 /* Src PIP Configuration Register */ |
109 | #define ATC_DPIP_OFFSET 0x1c /* Dst PIP Configuration Register */ |
110 | |
111 | |
112 | /* Bitfield definitions */ |
113 | |
114 | /* Bitfields in DSCR */ |
115 | #define ATC_DSCR_IF GENMASK(1, 0) /* Dsc feched via AHB-Lite Interface */ |
116 | |
117 | /* Bitfields in CTRLA */ |
118 | #define ATC_BTSIZE_MAX GENMASK(15, 0) /* Maximum Buffer Transfer Size */ |
119 | #define ATC_BTSIZE GENMASK(15, 0) /* Buffer Transfer Size */ |
120 | #define ATC_SCSIZE GENMASK(18, 16) /* Source Chunk Transfer Size */ |
121 | #define ATC_DCSIZE GENMASK(22, 20) /* Destination Chunk Transfer Size */ |
122 | #define ATC_SRC_WIDTH GENMASK(25, 24) /* Source Single Transfer Size */ |
123 | #define ATC_DST_WIDTH GENMASK(29, 28) /* Destination Single Transfer Size */ |
124 | #define ATC_DONE BIT(31) /* Tx Done (only written back in descriptor) */ |
125 | |
126 | /* Bitfields in CTRLB */ |
127 | #define ATC_SIF GENMASK(1, 0) /* Src tx done via AHB-Lite Interface i */ |
128 | #define ATC_DIF GENMASK(5, 4) /* Dst tx done via AHB-Lite Interface i */ |
129 | #define AT_DMA_MEM_IF 0x0 /* interface 0 as memory interface */ |
130 | #define AT_DMA_PER_IF 0x1 /* interface 1 as peripheral interface */ |
131 | #define ATC_SRC_PIP BIT(8) /* Source Picture-in-Picture enabled */ |
132 | #define ATC_DST_PIP BIT(12) /* Destination Picture-in-Picture enabled */ |
133 | #define ATC_SRC_DSCR_DIS BIT(16) /* Src Descriptor fetch disable */ |
134 | #define ATC_DST_DSCR_DIS BIT(20) /* Dst Descriptor fetch disable */ |
135 | #define ATC_FC GENMASK(23, 21) /* Choose Flow Controller */ |
136 | #define ATC_FC_MEM2MEM 0x0 /* Mem-to-Mem (DMA) */ |
137 | #define ATC_FC_MEM2PER 0x1 /* Mem-to-Periph (DMA) */ |
138 | #define ATC_FC_PER2MEM 0x2 /* Periph-to-Mem (DMA) */ |
139 | #define ATC_FC_PER2PER 0x3 /* Periph-to-Periph (DMA) */ |
140 | #define ATC_FC_PER2MEM_PER 0x4 /* Periph-to-Mem (Peripheral) */ |
141 | #define ATC_FC_MEM2PER_PER 0x5 /* Mem-to-Periph (Peripheral) */ |
142 | #define ATC_FC_PER2PER_SRCPER 0x6 /* Periph-to-Periph (Src Peripheral) */ |
143 | #define ATC_FC_PER2PER_DSTPER 0x7 /* Periph-to-Periph (Dst Peripheral) */ |
144 | #define ATC_SRC_ADDR_MODE GENMASK(25, 24) |
145 | #define ATC_SRC_ADDR_MODE_INCR 0x0 /* Incrementing Mode */ |
146 | #define ATC_SRC_ADDR_MODE_DECR 0x1 /* Decrementing Mode */ |
147 | #define ATC_SRC_ADDR_MODE_FIXED 0x2 /* Fixed Mode */ |
148 | #define ATC_DST_ADDR_MODE GENMASK(29, 28) |
149 | #define ATC_DST_ADDR_MODE_INCR 0x0 /* Incrementing Mode */ |
150 | #define ATC_DST_ADDR_MODE_DECR 0x1 /* Decrementing Mode */ |
151 | #define ATC_DST_ADDR_MODE_FIXED 0x2 /* Fixed Mode */ |
152 | #define ATC_IEN BIT(30) /* BTC interrupt enable (active low) */ |
153 | #define ATC_AUTO BIT(31) /* Auto multiple buffer tx enable */ |
154 | |
155 | /* Bitfields in CFG */ |
156 | #define ATC_SRC_PER GENMASK(3, 0) /* Channel src rq associated with periph handshaking ifc h */ |
157 | #define ATC_DST_PER GENMASK(7, 4) /* Channel dst rq associated with periph handshaking ifc h */ |
158 | #define ATC_SRC_REP BIT(8) /* Source Replay Mod */ |
159 | #define ATC_SRC_H2SEL BIT(9) /* Source Handshaking Mod */ |
160 | #define ATC_SRC_PER_MSB GENMASK(11, 10) /* Channel src rq (most significant bits) */ |
161 | #define ATC_DST_REP BIT(12) /* Destination Replay Mod */ |
162 | #define ATC_DST_H2SEL BIT(13) /* Destination Handshaking Mod */ |
163 | #define ATC_DST_PER_MSB GENMASK(15, 14) /* Channel dst rq (most significant bits) */ |
164 | #define ATC_SOD BIT(16) /* Stop On Done */ |
165 | #define ATC_LOCK_IF BIT(20) /* Interface Lock */ |
166 | #define ATC_LOCK_B BIT(21) /* AHB Bus Lock */ |
167 | #define ATC_LOCK_IF_L BIT(22) /* Master Interface Arbiter Lock */ |
168 | #define ATC_AHB_PROT GENMASK(26, 24) /* AHB Protection */ |
169 | #define ATC_FIFOCFG GENMASK(29, 28) /* FIFO Request Configuration */ |
170 | #define ATC_FIFOCFG_LARGESTBURST 0x0 |
171 | #define ATC_FIFOCFG_HALFFIFO 0x1 |
172 | #define ATC_FIFOCFG_ENOUGHSPACE 0x2 |
173 | |
174 | /* Bitfields in SPIP */ |
175 | #define ATC_SPIP_HOLE GENMASK(15, 0) |
176 | #define ATC_SPIP_BOUNDARY GENMASK(25, 16) |
177 | |
178 | /* Bitfields in DPIP */ |
179 | #define ATC_DPIP_HOLE GENMASK(15, 0) |
180 | #define ATC_DPIP_BOUNDARY GENMASK(25, 16) |
181 | |
182 | #define ATC_PER_MSB GENMASK(5, 4) /* Extract MSBs of a handshaking identifier */ |
183 | #define ATC_SRC_PER_ID(id) \ |
184 | ({ typeof(id) _id = (id); \ |
185 | FIELD_PREP(ATC_SRC_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) | \ |
186 | FIELD_PREP(ATC_SRC_PER, _id); }) |
187 | #define ATC_DST_PER_ID(id) \ |
188 | ({ typeof(id) _id = (id); \ |
189 | FIELD_PREP(ATC_DST_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) | \ |
190 | FIELD_PREP(ATC_DST_PER, _id); }) |
191 | |
192 | |
193 | |
194 | /*-- descriptors -----------------------------------------------------*/ |
195 | |
196 | /* LLI == Linked List Item; aka DMA buffer descriptor */ |
197 | struct at_lli { |
198 | /* values that are not changed by hardware */ |
199 | u32 saddr; |
200 | u32 daddr; |
201 | /* value that may get written back: */ |
202 | u32 ctrla; |
203 | /* more values that are not changed by hardware */ |
204 | u32 ctrlb; |
205 | u32 dscr; /* chain to next lli */ |
206 | }; |
207 | |
208 | /** |
209 | * struct atdma_sg - atdma scatter gather entry |
210 | * @len: length of the current Linked List Item. |
211 | * @lli: linked list item that is passed to the DMA controller |
212 | * @lli_phys: physical address of the LLI. |
213 | */ |
214 | struct atdma_sg { |
215 | unsigned int len; |
216 | struct at_lli *lli; |
217 | dma_addr_t lli_phys; |
218 | }; |
219 | |
220 | /** |
221 | * struct at_desc - software descriptor |
222 | * @vd: pointer to the virtual dma descriptor. |
223 | * @atchan: pointer to the atmel dma channel. |
224 | * @total_len: total transaction byte count |
225 | * @sglen: number of sg entries. |
226 | * @sg: array of sgs. |
227 | * @boundary: number of transfers to perform before the automatic address increment operation |
228 | * @dst_hole: value to add to the destination address when the boundary has been reached |
229 | * @src_hole: value to add to the source address when the boundary has been reached |
230 | * @memset_buffer: buffer used for the memset operation |
231 | * @memset_paddr: physical address of the buffer used for the memset operation |
232 | * @memset_vaddr: virtual address of the buffer used for the memset operation |
233 | */ |
234 | struct at_desc { |
235 | struct virt_dma_desc vd; |
236 | struct at_dma_chan *atchan; |
237 | size_t total_len; |
238 | unsigned int sglen; |
239 | /* Interleaved data */ |
240 | size_t boundary; |
241 | size_t dst_hole; |
242 | size_t src_hole; |
243 | |
244 | /* Memset temporary buffer */ |
245 | bool memset_buffer; |
246 | dma_addr_t memset_paddr; |
247 | int *memset_vaddr; |
248 | struct atdma_sg sg[] __counted_by(sglen); |
249 | }; |
250 | |
251 | /*-- Channels --------------------------------------------------------*/ |
252 | |
253 | /** |
254 | * enum atc_status - information bits stored in channel status flag |
255 | * |
256 | * @ATC_IS_PAUSED: If channel is pauses |
257 | * @ATC_IS_CYCLIC: If channel is cyclic |
258 | * |
259 | * Manipulated with atomic operations. |
260 | */ |
261 | enum atc_status { |
262 | ATC_IS_PAUSED = 1, |
263 | ATC_IS_CYCLIC = 24, |
264 | }; |
265 | |
266 | /** |
267 | * struct at_dma_chan - internal representation of an Atmel HDMAC channel |
268 | * @vc: virtual dma channel entry. |
269 | * @atdma: pointer to the driver data. |
270 | * @ch_regs: memory mapped register base |
271 | * @mask: channel index in a mask |
272 | * @per_if: peripheral interface |
273 | * @mem_if: memory interface |
274 | * @status: transmit status information from irq/prep* functions |
275 | * to tasklet (use atomic operations) |
276 | * @save_cfg: configuration register that is saved on suspend/resume cycle |
277 | * @save_dscr: for cyclic operations, preserve next descriptor address in |
278 | * the cyclic list on suspend/resume cycle |
279 | * @dma_sconfig: configuration for slave transfers, passed via |
280 | * .device_config |
281 | * @desc: pointer to the atmel dma descriptor. |
282 | */ |
283 | struct at_dma_chan { |
284 | struct virt_dma_chan vc; |
285 | struct at_dma *atdma; |
286 | void __iomem *ch_regs; |
287 | u8 mask; |
288 | u8 per_if; |
289 | u8 mem_if; |
290 | unsigned long status; |
291 | u32 save_cfg; |
292 | u32 save_dscr; |
293 | struct dma_slave_config dma_sconfig; |
294 | struct at_desc *desc; |
295 | }; |
296 | |
297 | #define channel_readl(atchan, name) \ |
298 | __raw_readl((atchan)->ch_regs + ATC_##name##_OFFSET) |
299 | |
300 | #define channel_writel(atchan, name, val) \ |
301 | __raw_writel((val), (atchan)->ch_regs + ATC_##name##_OFFSET) |
302 | |
303 | /* |
304 | * Fix sconfig's burst size according to at_hdmac. We need to convert them as: |
305 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3, 32 -> 4, 64 -> 5, 128 -> 6, 256 -> 7. |
306 | * |
307 | * This can be done by finding most significant bit set. |
308 | */ |
309 | static inline void convert_burst(u32 *maxburst) |
310 | { |
311 | if (*maxburst > 1) |
312 | *maxburst = fls(x: *maxburst) - 2; |
313 | else |
314 | *maxburst = 0; |
315 | } |
316 | |
317 | /* |
318 | * Fix sconfig's bus width according to at_hdmac. |
319 | * 1 byte -> 0, 2 bytes -> 1, 4 bytes -> 2. |
320 | */ |
321 | static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width) |
322 | { |
323 | switch (addr_width) { |
324 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
325 | return 1; |
326 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
327 | return 2; |
328 | default: |
329 | /* For 1 byte width or fallback */ |
330 | return 0; |
331 | } |
332 | } |
333 | |
334 | /*-- Controller ------------------------------------------------------*/ |
335 | |
336 | /** |
337 | * struct at_dma - internal representation of an Atmel HDMA Controller |
338 | * @dma_device: dmaengine dma_device object members |
339 | * @regs: memory mapped register base |
340 | * @clk: dma controller clock |
341 | * @save_imr: interrupt mask register that is saved on suspend/resume cycle |
342 | * @all_chan_mask: all channels availlable in a mask |
343 | * @lli_pool: hw lli table |
344 | * @memset_pool: hw memset pool |
345 | * @chan: channels table to store at_dma_chan structures |
346 | */ |
347 | struct at_dma { |
348 | struct dma_device dma_device; |
349 | void __iomem *regs; |
350 | struct clk *clk; |
351 | u32 save_imr; |
352 | |
353 | u8 all_chan_mask; |
354 | |
355 | struct dma_pool *lli_pool; |
356 | struct dma_pool *memset_pool; |
357 | /* AT THE END channels table */ |
358 | struct at_dma_chan chan[]; |
359 | }; |
360 | |
361 | #define dma_readl(atdma, name) \ |
362 | __raw_readl((atdma)->regs + AT_DMA_##name) |
363 | #define dma_writel(atdma, name, val) \ |
364 | __raw_writel((val), (atdma)->regs + AT_DMA_##name) |
365 | |
366 | static inline struct at_desc *to_atdma_desc(struct dma_async_tx_descriptor *t) |
367 | { |
368 | return container_of(t, struct at_desc, vd.tx); |
369 | } |
370 | |
371 | static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *chan) |
372 | { |
373 | return container_of(chan, struct at_dma_chan, vc.chan); |
374 | } |
375 | |
376 | static inline struct at_dma *to_at_dma(struct dma_device *ddev) |
377 | { |
378 | return container_of(ddev, struct at_dma, dma_device); |
379 | } |
380 | |
381 | |
382 | /*-- Helper functions ------------------------------------------------*/ |
383 | |
384 | static struct device *chan2dev(struct dma_chan *chan) |
385 | { |
386 | return &chan->dev->device; |
387 | } |
388 | |
389 | #if defined(VERBOSE_DEBUG) |
390 | static void vdbg_dump_regs(struct at_dma_chan *atchan) |
391 | { |
392 | struct at_dma *atdma = to_at_dma(ddev: atchan->vc.chan.device); |
393 | |
394 | dev_err(chan2dev(&atchan->vc.chan), |
395 | " channel %d : imr = 0x%x, chsr = 0x%x\n" , |
396 | atchan->vc.chan.chan_id, |
397 | dma_readl(atdma, EBCIMR), |
398 | dma_readl(atdma, CHSR)); |
399 | |
400 | dev_err(chan2dev(&atchan->vc.chan), |
401 | " channel: s0x%x d0x%x ctrl0x%x:0x%x cfg0x%x l0x%x\n" , |
402 | channel_readl(atchan, SADDR), |
403 | channel_readl(atchan, DADDR), |
404 | channel_readl(atchan, CTRLA), |
405 | channel_readl(atchan, CTRLB), |
406 | channel_readl(atchan, CFG), |
407 | channel_readl(atchan, DSCR)); |
408 | } |
409 | #else |
410 | static void vdbg_dump_regs(struct at_dma_chan *atchan) {} |
411 | #endif |
412 | |
413 | static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) |
414 | { |
415 | dev_crit(chan2dev(&atchan->vc.chan), |
416 | "desc: s%pad d%pad ctrl0x%x:0x%x l%pad\n" , |
417 | &lli->saddr, &lli->daddr, |
418 | lli->ctrla, lli->ctrlb, &lli->dscr); |
419 | } |
420 | |
421 | |
422 | static void atc_setup_irq(struct at_dma *atdma, int chan_id, int on) |
423 | { |
424 | u32 ebci; |
425 | |
426 | /* enable interrupts on buffer transfer completion & error */ |
427 | ebci = AT_DMA_BTC(chan_id) |
428 | | AT_DMA_ERR(chan_id); |
429 | if (on) |
430 | dma_writel(atdma, EBCIER, ebci); |
431 | else |
432 | dma_writel(atdma, EBCIDR, ebci); |
433 | } |
434 | |
435 | static void atc_enable_chan_irq(struct at_dma *atdma, int chan_id) |
436 | { |
437 | atc_setup_irq(atdma, chan_id, on: 1); |
438 | } |
439 | |
440 | static void atc_disable_chan_irq(struct at_dma *atdma, int chan_id) |
441 | { |
442 | atc_setup_irq(atdma, chan_id, on: 0); |
443 | } |
444 | |
445 | |
446 | /** |
447 | * atc_chan_is_enabled - test if given channel is enabled |
448 | * @atchan: channel we want to test status |
449 | */ |
450 | static inline int atc_chan_is_enabled(struct at_dma_chan *atchan) |
451 | { |
452 | struct at_dma *atdma = to_at_dma(ddev: atchan->vc.chan.device); |
453 | |
454 | return !!(dma_readl(atdma, CHSR) & atchan->mask); |
455 | } |
456 | |
457 | /** |
458 | * atc_chan_is_paused - test channel pause/resume status |
459 | * @atchan: channel we want to test status |
460 | */ |
461 | static inline int atc_chan_is_paused(struct at_dma_chan *atchan) |
462 | { |
463 | return test_bit(ATC_IS_PAUSED, &atchan->status); |
464 | } |
465 | |
466 | /** |
467 | * atc_chan_is_cyclic - test if given channel has cyclic property set |
468 | * @atchan: channel we want to test status |
469 | */ |
470 | static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan) |
471 | { |
472 | return test_bit(ATC_IS_CYCLIC, &atchan->status); |
473 | } |
474 | |
475 | /** |
476 | * set_lli_eol - set end-of-link to descriptor so it will end transfer |
477 | * @desc: descriptor, signle or at the end of a chain, to end chain on |
478 | * @i: index of the atmel scatter gather entry that is at the end of the chain. |
479 | */ |
480 | static void set_lli_eol(struct at_desc *desc, unsigned int i) |
481 | { |
482 | u32 ctrlb = desc->sg[i].lli->ctrlb; |
483 | |
484 | ctrlb &= ~ATC_IEN; |
485 | ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS; |
486 | |
487 | desc->sg[i].lli->ctrlb = ctrlb; |
488 | desc->sg[i].lli->dscr = 0; |
489 | } |
490 | |
491 | #define ATC_DEFAULT_CFG FIELD_PREP(ATC_FIFOCFG, ATC_FIFOCFG_HALFFIFO) |
492 | #define ATC_DEFAULT_CTRLB (FIELD_PREP(ATC_SIF, AT_DMA_MEM_IF) | \ |
493 | FIELD_PREP(ATC_DIF, AT_DMA_MEM_IF)) |
494 | #define ATC_DMA_BUSWIDTHS\ |
495 | (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ |
496 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\ |
497 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ |
498 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) |
499 | |
500 | #define ATC_MAX_DSCR_TRIALS 10 |
501 | |
502 | /* |
503 | * Initial number of descriptors to allocate for each channel. This could |
504 | * be increased during dma usage. |
505 | */ |
506 | static unsigned int init_nr_desc_per_channel = 64; |
507 | module_param(init_nr_desc_per_channel, uint, 0644); |
508 | MODULE_PARM_DESC(init_nr_desc_per_channel, |
509 | "initial descriptors per channel (default: 64)" ); |
510 | |
511 | /** |
512 | * struct at_dma_platform_data - Controller configuration parameters |
513 | * @nr_channels: Number of channels supported by hardware (max 8) |
514 | * @cap_mask: dma_capability flags supported by the platform |
515 | */ |
516 | struct at_dma_platform_data { |
517 | unsigned int nr_channels; |
518 | dma_cap_mask_t cap_mask; |
519 | }; |
520 | |
521 | /** |
522 | * struct at_dma_slave - Controller-specific information about a slave |
523 | * @dma_dev: required DMA master device |
524 | * @cfg: Platform-specific initializer for the CFG register |
525 | */ |
526 | struct at_dma_slave { |
527 | struct device *dma_dev; |
528 | u32 cfg; |
529 | }; |
530 | |
531 | static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst, |
532 | size_t len) |
533 | { |
534 | unsigned int width; |
535 | |
536 | if (!((src | dst | len) & 3)) |
537 | width = 2; |
538 | else if (!((src | dst | len) & 1)) |
539 | width = 1; |
540 | else |
541 | width = 0; |
542 | |
543 | return width; |
544 | } |
545 | |
546 | static void atdma_lli_chain(struct at_desc *desc, unsigned int i) |
547 | { |
548 | struct atdma_sg *atdma_sg = &desc->sg[i]; |
549 | |
550 | if (i) |
551 | desc->sg[i - 1].lli->dscr = atdma_sg->lli_phys; |
552 | } |
553 | |
554 | /** |
555 | * atc_dostart - starts the DMA engine for real |
556 | * @atchan: the channel we want to start |
557 | */ |
558 | static void atc_dostart(struct at_dma_chan *atchan) |
559 | { |
560 | struct virt_dma_desc *vd = vchan_next_desc(vc: &atchan->vc); |
561 | struct at_desc *desc; |
562 | |
563 | if (!vd) { |
564 | atchan->desc = NULL; |
565 | return; |
566 | } |
567 | |
568 | vdbg_dump_regs(atchan); |
569 | |
570 | list_del(entry: &vd->node); |
571 | atchan->desc = desc = to_atdma_desc(t: &vd->tx); |
572 | |
573 | channel_writel(atchan, SADDR, 0); |
574 | channel_writel(atchan, DADDR, 0); |
575 | channel_writel(atchan, CTRLA, 0); |
576 | channel_writel(atchan, CTRLB, 0); |
577 | channel_writel(atchan, DSCR, desc->sg[0].lli_phys); |
578 | channel_writel(atchan, SPIP, |
579 | FIELD_PREP(ATC_SPIP_HOLE, desc->src_hole) | |
580 | FIELD_PREP(ATC_SPIP_BOUNDARY, desc->boundary)); |
581 | channel_writel(atchan, DPIP, |
582 | FIELD_PREP(ATC_DPIP_HOLE, desc->dst_hole) | |
583 | FIELD_PREP(ATC_DPIP_BOUNDARY, desc->boundary)); |
584 | |
585 | /* Don't allow CPU to reorder channel enable. */ |
586 | wmb(); |
587 | dma_writel(atchan->atdma, CHER, atchan->mask); |
588 | |
589 | vdbg_dump_regs(atchan); |
590 | } |
591 | |
592 | static void atdma_desc_free(struct virt_dma_desc *vd) |
593 | { |
594 | struct at_dma *atdma = to_at_dma(ddev: vd->tx.chan->device); |
595 | struct at_desc *desc = to_atdma_desc(t: &vd->tx); |
596 | unsigned int i; |
597 | |
598 | for (i = 0; i < desc->sglen; i++) { |
599 | if (desc->sg[i].lli) |
600 | dma_pool_free(pool: atdma->lli_pool, vaddr: desc->sg[i].lli, |
601 | addr: desc->sg[i].lli_phys); |
602 | } |
603 | |
604 | /* If the transfer was a memset, free our temporary buffer */ |
605 | if (desc->memset_buffer) { |
606 | dma_pool_free(pool: atdma->memset_pool, vaddr: desc->memset_vaddr, |
607 | addr: desc->memset_paddr); |
608 | desc->memset_buffer = false; |
609 | } |
610 | |
611 | kfree(objp: desc); |
612 | } |
613 | |
614 | /** |
615 | * atc_calc_bytes_left - calculates the number of bytes left according to the |
616 | * value read from CTRLA. |
617 | * |
618 | * @current_len: the number of bytes left before reading CTRLA |
619 | * @ctrla: the value of CTRLA |
620 | */ |
621 | static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla) |
622 | { |
623 | u32 btsize = FIELD_GET(ATC_BTSIZE, ctrla); |
624 | u32 src_width = FIELD_GET(ATC_SRC_WIDTH, ctrla); |
625 | |
626 | /* |
627 | * According to the datasheet, when reading the Control A Register |
628 | * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the |
629 | * number of transfers completed on the Source Interface. |
630 | * So btsize is always a number of source width transfers. |
631 | */ |
632 | return current_len - (btsize << src_width); |
633 | } |
634 | |
635 | /** |
636 | * atc_get_llis_residue - Get residue for a hardware linked list transfer |
637 | * @atchan: pointer to an atmel hdmac channel. |
638 | * @desc: pointer to the descriptor for which the residue is calculated. |
639 | * @residue: residue to be set to dma_tx_state. |
640 | * |
641 | * Calculate the residue by removing the length of the Linked List Item (LLI) |
642 | * already transferred from the total length. To get the current LLI we can use |
643 | * the value of the channel's DSCR register and compare it against the DSCR |
644 | * value of each LLI. |
645 | * |
646 | * The CTRLA register provides us with the amount of data already read from the |
647 | * source for the LLI. So we can compute a more accurate residue by also |
648 | * removing the number of bytes corresponding to this amount of data. |
649 | * |
650 | * However, the DSCR and CTRLA registers cannot be read both atomically. Hence a |
651 | * race condition may occur: the first read register may refer to one LLI |
652 | * whereas the second read may refer to a later LLI in the list because of the |
653 | * DMA transfer progression inbetween the two reads. |
654 | * |
655 | * One solution could have been to pause the DMA transfer, read the DSCR and |
656 | * CTRLA then resume the DMA transfer. Nonetheless, this approach presents some |
657 | * drawbacks: |
658 | * - If the DMA transfer is paused, RX overruns or TX underruns are more likey |
659 | * to occur depending on the system latency. Taking the USART driver as an |
660 | * example, it uses a cyclic DMA transfer to read data from the Receive |
661 | * Holding Register (RHR) to avoid RX overruns since the RHR is not protected |
662 | * by any FIFO on most Atmel SoCs. So pausing the DMA transfer to compute the |
663 | * residue would break the USART driver design. |
664 | * - The atc_pause() function masks interrupts but we'd rather avoid to do so |
665 | * for system latency purpose. |
666 | * |
667 | * Then we'd rather use another solution: the DSCR is read a first time, the |
668 | * CTRLA is read in turn, next the DSCR is read a second time. If the two |
669 | * consecutive read values of the DSCR are the same then we assume both refers |
670 | * to the very same LLI as well as the CTRLA value read inbetween does. For |
671 | * cyclic tranfers, the assumption is that a full loop is "not so fast". If the |
672 | * two DSCR values are different, we read again the CTRLA then the DSCR till two |
673 | * consecutive read values from DSCR are equal or till the maximum trials is |
674 | * reach. This algorithm is very unlikely not to find a stable value for DSCR. |
675 | * |
676 | * Returns: %0 on success, -errno otherwise. |
677 | */ |
678 | static int atc_get_llis_residue(struct at_dma_chan *atchan, |
679 | struct at_desc *desc, u32 *residue) |
680 | { |
681 | u32 len, ctrla, dscr; |
682 | unsigned int i; |
683 | |
684 | len = desc->total_len; |
685 | dscr = channel_readl(atchan, DSCR); |
686 | rmb(); /* ensure DSCR is read before CTRLA */ |
687 | ctrla = channel_readl(atchan, CTRLA); |
688 | for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) { |
689 | u32 new_dscr; |
690 | |
691 | rmb(); /* ensure DSCR is read after CTRLA */ |
692 | new_dscr = channel_readl(atchan, DSCR); |
693 | |
694 | /* |
695 | * If the DSCR register value has not changed inside the DMA |
696 | * controller since the previous read, we assume that both the |
697 | * dscr and ctrla values refers to the very same descriptor. |
698 | */ |
699 | if (likely(new_dscr == dscr)) |
700 | break; |
701 | |
702 | /* |
703 | * DSCR has changed inside the DMA controller, so the previouly |
704 | * read value of CTRLA may refer to an already processed |
705 | * descriptor hence could be outdated. We need to update ctrla |
706 | * to match the current descriptor. |
707 | */ |
708 | dscr = new_dscr; |
709 | rmb(); /* ensure DSCR is read before CTRLA */ |
710 | ctrla = channel_readl(atchan, CTRLA); |
711 | } |
712 | if (unlikely(i == ATC_MAX_DSCR_TRIALS)) |
713 | return -ETIMEDOUT; |
714 | |
715 | /* For the first descriptor we can be more accurate. */ |
716 | if (desc->sg[0].lli->dscr == dscr) { |
717 | *residue = atc_calc_bytes_left(current_len: len, ctrla); |
718 | return 0; |
719 | } |
720 | len -= desc->sg[0].len; |
721 | |
722 | for (i = 1; i < desc->sglen; i++) { |
723 | if (desc->sg[i].lli && desc->sg[i].lli->dscr == dscr) |
724 | break; |
725 | len -= desc->sg[i].len; |
726 | } |
727 | |
728 | /* |
729 | * For the current LLI in the chain we can calculate the remaining bytes |
730 | * using the channel's CTRLA register. |
731 | */ |
732 | *residue = atc_calc_bytes_left(current_len: len, ctrla); |
733 | return 0; |
734 | |
735 | } |
736 | |
737 | /** |
738 | * atc_get_residue - get the number of bytes residue for a cookie. |
739 | * The residue is passed by address and updated on success. |
740 | * @chan: DMA channel |
741 | * @cookie: transaction identifier to check status of |
742 | * @residue: residue to be updated. |
743 | * |
744 | * Return: %0 on success, -errno otherwise. |
745 | */ |
746 | static int atc_get_residue(struct dma_chan *chan, dma_cookie_t cookie, |
747 | u32 *residue) |
748 | { |
749 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
750 | struct virt_dma_desc *vd; |
751 | struct at_desc *desc = NULL; |
752 | u32 len, ctrla; |
753 | |
754 | vd = vchan_find_desc(&atchan->vc, cookie); |
755 | if (vd) |
756 | desc = to_atdma_desc(t: &vd->tx); |
757 | else if (atchan->desc && atchan->desc->vd.tx.cookie == cookie) |
758 | desc = atchan->desc; |
759 | |
760 | if (!desc) |
761 | return -EINVAL; |
762 | |
763 | if (desc->sg[0].lli->dscr) |
764 | /* hardware linked list transfer */ |
765 | return atc_get_llis_residue(atchan, desc, residue); |
766 | |
767 | /* single transfer */ |
768 | len = desc->total_len; |
769 | ctrla = channel_readl(atchan, CTRLA); |
770 | *residue = atc_calc_bytes_left(current_len: len, ctrla); |
771 | return 0; |
772 | } |
773 | |
774 | /** |
775 | * atc_handle_error - handle errors reported by DMA controller |
776 | * @atchan: channel where error occurs. |
777 | * @i: channel index |
778 | */ |
779 | static void atc_handle_error(struct at_dma_chan *atchan, unsigned int i) |
780 | { |
781 | struct at_desc *desc = atchan->desc; |
782 | |
783 | /* Disable channel on AHB error */ |
784 | dma_writel(atchan->atdma, CHDR, AT_DMA_RES(i) | atchan->mask); |
785 | |
786 | /* |
787 | * KERN_CRITICAL may seem harsh, but since this only happens |
788 | * when someone submits a bad physical address in a |
789 | * descriptor, we should consider ourselves lucky that the |
790 | * controller flagged an error instead of scribbling over |
791 | * random memory locations. |
792 | */ |
793 | dev_crit(chan2dev(&atchan->vc.chan), "Bad descriptor submitted for DMA!\n" ); |
794 | dev_crit(chan2dev(&atchan->vc.chan), "cookie: %d\n" , |
795 | desc->vd.tx.cookie); |
796 | for (i = 0; i < desc->sglen; i++) |
797 | atc_dump_lli(atchan, lli: desc->sg[i].lli); |
798 | } |
799 | |
800 | static void atdma_handle_chan_done(struct at_dma_chan *atchan, u32 pending, |
801 | unsigned int i) |
802 | { |
803 | struct at_desc *desc; |
804 | |
805 | spin_lock(lock: &atchan->vc.lock); |
806 | desc = atchan->desc; |
807 | |
808 | if (desc) { |
809 | if (pending & AT_DMA_ERR(i)) { |
810 | atc_handle_error(atchan, i); |
811 | /* Pretend the descriptor completed successfully */ |
812 | } |
813 | |
814 | if (atc_chan_is_cyclic(atchan)) { |
815 | vchan_cyclic_callback(vd: &desc->vd); |
816 | } else { |
817 | vchan_cookie_complete(vd: &desc->vd); |
818 | atchan->desc = NULL; |
819 | if (!(atc_chan_is_enabled(atchan))) |
820 | atc_dostart(atchan); |
821 | } |
822 | } |
823 | spin_unlock(lock: &atchan->vc.lock); |
824 | } |
825 | |
826 | static irqreturn_t at_dma_interrupt(int irq, void *dev_id) |
827 | { |
828 | struct at_dma *atdma = dev_id; |
829 | struct at_dma_chan *atchan; |
830 | int i; |
831 | u32 status, pending, imr; |
832 | int ret = IRQ_NONE; |
833 | |
834 | do { |
835 | imr = dma_readl(atdma, EBCIMR); |
836 | status = dma_readl(atdma, EBCISR); |
837 | pending = status & imr; |
838 | |
839 | if (!pending) |
840 | break; |
841 | |
842 | dev_vdbg(atdma->dma_device.dev, |
843 | "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n" , |
844 | status, imr, pending); |
845 | |
846 | for (i = 0; i < atdma->dma_device.chancnt; i++) { |
847 | atchan = &atdma->chan[i]; |
848 | if (!(pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i)))) |
849 | continue; |
850 | atdma_handle_chan_done(atchan, pending, i); |
851 | ret = IRQ_HANDLED; |
852 | } |
853 | |
854 | } while (pending); |
855 | |
856 | return ret; |
857 | } |
858 | |
859 | /*-- DMA Engine API --------------------------------------------------*/ |
860 | /** |
861 | * atc_prep_dma_interleaved - prepare memory to memory interleaved operation |
862 | * @chan: the channel to prepare operation on |
863 | * @xt: Interleaved transfer template |
864 | * @flags: tx descriptor status flags |
865 | */ |
866 | static struct dma_async_tx_descriptor * |
867 | atc_prep_dma_interleaved(struct dma_chan *chan, |
868 | struct dma_interleaved_template *xt, |
869 | unsigned long flags) |
870 | { |
871 | struct at_dma *atdma = to_at_dma(ddev: chan->device); |
872 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
873 | struct data_chunk *first; |
874 | struct atdma_sg *atdma_sg; |
875 | struct at_desc *desc; |
876 | struct at_lli *lli; |
877 | size_t xfer_count; |
878 | unsigned int dwidth; |
879 | u32 ctrla; |
880 | u32 ctrlb; |
881 | size_t len = 0; |
882 | int i; |
883 | |
884 | if (unlikely(!xt || xt->numf != 1 || !xt->frame_size)) |
885 | return NULL; |
886 | |
887 | first = xt->sgl; |
888 | |
889 | dev_info(chan2dev(chan), |
890 | "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n" , |
891 | __func__, &xt->src_start, &xt->dst_start, xt->numf, |
892 | xt->frame_size, flags); |
893 | |
894 | /* |
895 | * The controller can only "skip" X bytes every Y bytes, so we |
896 | * need to make sure we are given a template that fit that |
897 | * description, ie a template with chunks that always have the |
898 | * same size, with the same ICGs. |
899 | */ |
900 | for (i = 0; i < xt->frame_size; i++) { |
901 | struct data_chunk *chunk = xt->sgl + i; |
902 | |
903 | if ((chunk->size != xt->sgl->size) || |
904 | (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, chunk: first)) || |
905 | (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, chunk: first))) { |
906 | dev_err(chan2dev(chan), |
907 | "%s: the controller can transfer only identical chunks\n" , |
908 | __func__); |
909 | return NULL; |
910 | } |
911 | |
912 | len += chunk->size; |
913 | } |
914 | |
915 | dwidth = atc_get_xfer_width(src: xt->src_start, dst: xt->dst_start, len); |
916 | |
917 | xfer_count = len >> dwidth; |
918 | if (xfer_count > ATC_BTSIZE_MAX) { |
919 | dev_err(chan2dev(chan), "%s: buffer is too big\n" , __func__); |
920 | return NULL; |
921 | } |
922 | |
923 | ctrla = FIELD_PREP(ATC_SRC_WIDTH, dwidth) | |
924 | FIELD_PREP(ATC_DST_WIDTH, dwidth); |
925 | |
926 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | |
927 | FIELD_PREP(ATC_SRC_ADDR_MODE, ATC_SRC_ADDR_MODE_INCR) | |
928 | FIELD_PREP(ATC_DST_ADDR_MODE, ATC_DST_ADDR_MODE_INCR) | |
929 | ATC_SRC_PIP | ATC_DST_PIP | |
930 | FIELD_PREP(ATC_FC, ATC_FC_MEM2MEM); |
931 | |
932 | desc = kzalloc(struct_size(desc, sg, 1), GFP_ATOMIC); |
933 | if (!desc) |
934 | return NULL; |
935 | desc->sglen = 1; |
936 | |
937 | atdma_sg = desc->sg; |
938 | atdma_sg->lli = dma_pool_alloc(pool: atdma->lli_pool, GFP_NOWAIT, |
939 | handle: &atdma_sg->lli_phys); |
940 | if (!atdma_sg->lli) { |
941 | kfree(objp: desc); |
942 | return NULL; |
943 | } |
944 | lli = atdma_sg->lli; |
945 | |
946 | lli->saddr = xt->src_start; |
947 | lli->daddr = xt->dst_start; |
948 | lli->ctrla = ctrla | xfer_count; |
949 | lli->ctrlb = ctrlb; |
950 | |
951 | desc->boundary = first->size >> dwidth; |
952 | desc->dst_hole = (dmaengine_get_dst_icg(xt, chunk: first) >> dwidth) + 1; |
953 | desc->src_hole = (dmaengine_get_src_icg(xt, chunk: first) >> dwidth) + 1; |
954 | |
955 | atdma_sg->len = len; |
956 | desc->total_len = len; |
957 | |
958 | set_lli_eol(desc, i: 0); |
959 | return vchan_tx_prep(vc: &atchan->vc, vd: &desc->vd, tx_flags: flags); |
960 | } |
961 | |
962 | /** |
963 | * atc_prep_dma_memcpy - prepare a memcpy operation |
964 | * @chan: the channel to prepare operation on |
965 | * @dest: operation virtual destination address |
966 | * @src: operation virtual source address |
967 | * @len: operation length |
968 | * @flags: tx descriptor status flags |
969 | */ |
970 | static struct dma_async_tx_descriptor * |
971 | atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
972 | size_t len, unsigned long flags) |
973 | { |
974 | struct at_dma *atdma = to_at_dma(ddev: chan->device); |
975 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
976 | struct at_desc *desc = NULL; |
977 | size_t xfer_count; |
978 | size_t offset; |
979 | size_t sg_len; |
980 | unsigned int src_width; |
981 | unsigned int dst_width; |
982 | unsigned int i; |
983 | u32 ctrla; |
984 | u32 ctrlb; |
985 | |
986 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n" , |
987 | &dest, &src, len, flags); |
988 | |
989 | if (unlikely(!len)) { |
990 | dev_err(chan2dev(chan), "prep_dma_memcpy: length is zero!\n" ); |
991 | return NULL; |
992 | } |
993 | |
994 | sg_len = DIV_ROUND_UP(len, ATC_BTSIZE_MAX); |
995 | desc = kzalloc(struct_size(desc, sg, sg_len), GFP_ATOMIC); |
996 | if (!desc) |
997 | return NULL; |
998 | desc->sglen = sg_len; |
999 | |
1000 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | |
1001 | FIELD_PREP(ATC_SRC_ADDR_MODE, ATC_SRC_ADDR_MODE_INCR) | |
1002 | FIELD_PREP(ATC_DST_ADDR_MODE, ATC_DST_ADDR_MODE_INCR) | |
1003 | FIELD_PREP(ATC_FC, ATC_FC_MEM2MEM); |
1004 | |
1005 | /* |
1006 | * We can be a lot more clever here, but this should take care |
1007 | * of the most common optimization. |
1008 | */ |
1009 | src_width = dst_width = atc_get_xfer_width(src, dst: dest, len); |
1010 | |
1011 | ctrla = FIELD_PREP(ATC_SRC_WIDTH, src_width) | |
1012 | FIELD_PREP(ATC_DST_WIDTH, dst_width); |
1013 | |
1014 | for (offset = 0, i = 0; offset < len; |
1015 | offset += xfer_count << src_width, i++) { |
1016 | struct atdma_sg *atdma_sg = &desc->sg[i]; |
1017 | struct at_lli *lli; |
1018 | |
1019 | atdma_sg->lli = dma_pool_alloc(pool: atdma->lli_pool, GFP_NOWAIT, |
1020 | handle: &atdma_sg->lli_phys); |
1021 | if (!atdma_sg->lli) |
1022 | goto err_desc_get; |
1023 | lli = atdma_sg->lli; |
1024 | |
1025 | xfer_count = min_t(size_t, (len - offset) >> src_width, |
1026 | ATC_BTSIZE_MAX); |
1027 | |
1028 | lli->saddr = src + offset; |
1029 | lli->daddr = dest + offset; |
1030 | lli->ctrla = ctrla | xfer_count; |
1031 | lli->ctrlb = ctrlb; |
1032 | |
1033 | desc->sg[i].len = xfer_count << src_width; |
1034 | |
1035 | atdma_lli_chain(desc, i); |
1036 | } |
1037 | |
1038 | desc->total_len = len; |
1039 | |
1040 | /* set end-of-link to the last link descriptor of list*/ |
1041 | set_lli_eol(desc, i: i - 1); |
1042 | |
1043 | return vchan_tx_prep(vc: &atchan->vc, vd: &desc->vd, tx_flags: flags); |
1044 | |
1045 | err_desc_get: |
1046 | atdma_desc_free(vd: &desc->vd); |
1047 | return NULL; |
1048 | } |
1049 | |
1050 | static int atdma_create_memset_lli(struct dma_chan *chan, |
1051 | struct atdma_sg *atdma_sg, |
1052 | dma_addr_t psrc, dma_addr_t pdst, size_t len) |
1053 | { |
1054 | struct at_dma *atdma = to_at_dma(ddev: chan->device); |
1055 | struct at_lli *lli; |
1056 | size_t xfer_count; |
1057 | u32 ctrla = FIELD_PREP(ATC_SRC_WIDTH, 2) | FIELD_PREP(ATC_DST_WIDTH, 2); |
1058 | u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | |
1059 | FIELD_PREP(ATC_SRC_ADDR_MODE, ATC_SRC_ADDR_MODE_FIXED) | |
1060 | FIELD_PREP(ATC_DST_ADDR_MODE, ATC_DST_ADDR_MODE_INCR) | |
1061 | FIELD_PREP(ATC_FC, ATC_FC_MEM2MEM); |
1062 | |
1063 | xfer_count = len >> 2; |
1064 | if (xfer_count > ATC_BTSIZE_MAX) { |
1065 | dev_err(chan2dev(chan), "%s: buffer is too big\n" , __func__); |
1066 | return -EINVAL; |
1067 | } |
1068 | |
1069 | atdma_sg->lli = dma_pool_alloc(pool: atdma->lli_pool, GFP_NOWAIT, |
1070 | handle: &atdma_sg->lli_phys); |
1071 | if (!atdma_sg->lli) |
1072 | return -ENOMEM; |
1073 | lli = atdma_sg->lli; |
1074 | |
1075 | lli->saddr = psrc; |
1076 | lli->daddr = pdst; |
1077 | lli->ctrla = ctrla | xfer_count; |
1078 | lli->ctrlb = ctrlb; |
1079 | |
1080 | atdma_sg->len = len; |
1081 | |
1082 | return 0; |
1083 | } |
1084 | |
1085 | /** |
1086 | * atc_prep_dma_memset - prepare a memcpy operation |
1087 | * @chan: the channel to prepare operation on |
1088 | * @dest: operation virtual destination address |
1089 | * @value: value to set memory buffer to |
1090 | * @len: operation length |
1091 | * @flags: tx descriptor status flags |
1092 | */ |
1093 | static struct dma_async_tx_descriptor * |
1094 | atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, |
1095 | size_t len, unsigned long flags) |
1096 | { |
1097 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1098 | struct at_dma *atdma = to_at_dma(ddev: chan->device); |
1099 | struct at_desc *desc; |
1100 | void __iomem *vaddr; |
1101 | dma_addr_t paddr; |
1102 | char fill_pattern; |
1103 | int ret; |
1104 | |
1105 | dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n" , __func__, |
1106 | &dest, value, len, flags); |
1107 | |
1108 | if (unlikely(!len)) { |
1109 | dev_dbg(chan2dev(chan), "%s: length is zero!\n" , __func__); |
1110 | return NULL; |
1111 | } |
1112 | |
1113 | if (!is_dma_fill_aligned(dev: chan->device, off1: dest, off2: 0, len)) { |
1114 | dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n" , |
1115 | __func__); |
1116 | return NULL; |
1117 | } |
1118 | |
1119 | vaddr = dma_pool_alloc(pool: atdma->memset_pool, GFP_NOWAIT, handle: &paddr); |
1120 | if (!vaddr) { |
1121 | dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n" , |
1122 | __func__); |
1123 | return NULL; |
1124 | } |
1125 | |
1126 | /* Only the first byte of value is to be used according to dmaengine */ |
1127 | fill_pattern = (char)value; |
1128 | |
1129 | *(u32*)vaddr = (fill_pattern << 24) | |
1130 | (fill_pattern << 16) | |
1131 | (fill_pattern << 8) | |
1132 | fill_pattern; |
1133 | |
1134 | desc = kzalloc(struct_size(desc, sg, 1), GFP_ATOMIC); |
1135 | if (!desc) |
1136 | goto err_free_buffer; |
1137 | desc->sglen = 1; |
1138 | |
1139 | ret = atdma_create_memset_lli(chan, atdma_sg: desc->sg, psrc: paddr, pdst: dest, len); |
1140 | if (ret) |
1141 | goto err_free_desc; |
1142 | |
1143 | desc->memset_paddr = paddr; |
1144 | desc->memset_vaddr = vaddr; |
1145 | desc->memset_buffer = true; |
1146 | |
1147 | desc->total_len = len; |
1148 | |
1149 | /* set end-of-link on the descriptor */ |
1150 | set_lli_eol(desc, i: 0); |
1151 | |
1152 | return vchan_tx_prep(vc: &atchan->vc, vd: &desc->vd, tx_flags: flags); |
1153 | |
1154 | err_free_desc: |
1155 | kfree(objp: desc); |
1156 | err_free_buffer: |
1157 | dma_pool_free(pool: atdma->memset_pool, vaddr, addr: paddr); |
1158 | return NULL; |
1159 | } |
1160 | |
1161 | static struct dma_async_tx_descriptor * |
1162 | atc_prep_dma_memset_sg(struct dma_chan *chan, |
1163 | struct scatterlist *sgl, |
1164 | unsigned int sg_len, int value, |
1165 | unsigned long flags) |
1166 | { |
1167 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1168 | struct at_dma *atdma = to_at_dma(ddev: chan->device); |
1169 | struct at_desc *desc; |
1170 | struct scatterlist *sg; |
1171 | void __iomem *vaddr; |
1172 | dma_addr_t paddr; |
1173 | size_t total_len = 0; |
1174 | int i; |
1175 | int ret; |
1176 | |
1177 | dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n" , __func__, |
1178 | value, sg_len, flags); |
1179 | |
1180 | if (unlikely(!sgl || !sg_len)) { |
1181 | dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n" , |
1182 | __func__); |
1183 | return NULL; |
1184 | } |
1185 | |
1186 | vaddr = dma_pool_alloc(pool: atdma->memset_pool, GFP_NOWAIT, handle: &paddr); |
1187 | if (!vaddr) { |
1188 | dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n" , |
1189 | __func__); |
1190 | return NULL; |
1191 | } |
1192 | *(u32*)vaddr = value; |
1193 | |
1194 | desc = kzalloc(struct_size(desc, sg, sg_len), GFP_ATOMIC); |
1195 | if (!desc) |
1196 | goto err_free_dma_buf; |
1197 | desc->sglen = sg_len; |
1198 | |
1199 | for_each_sg(sgl, sg, sg_len, i) { |
1200 | dma_addr_t dest = sg_dma_address(sg); |
1201 | size_t len = sg_dma_len(sg); |
1202 | |
1203 | dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n" , |
1204 | __func__, &dest, len); |
1205 | |
1206 | if (!is_dma_fill_aligned(dev: chan->device, off1: dest, off2: 0, len)) { |
1207 | dev_err(chan2dev(chan), "%s: buffer is not aligned\n" , |
1208 | __func__); |
1209 | goto err_free_desc; |
1210 | } |
1211 | |
1212 | ret = atdma_create_memset_lli(chan, atdma_sg: &desc->sg[i], psrc: paddr, pdst: dest, |
1213 | len); |
1214 | if (ret) |
1215 | goto err_free_desc; |
1216 | |
1217 | atdma_lli_chain(desc, i); |
1218 | total_len += len; |
1219 | } |
1220 | |
1221 | desc->memset_paddr = paddr; |
1222 | desc->memset_vaddr = vaddr; |
1223 | desc->memset_buffer = true; |
1224 | |
1225 | desc->total_len = total_len; |
1226 | |
1227 | /* set end-of-link on the descriptor */ |
1228 | set_lli_eol(desc, i: i - 1); |
1229 | |
1230 | return vchan_tx_prep(vc: &atchan->vc, vd: &desc->vd, tx_flags: flags); |
1231 | |
1232 | err_free_desc: |
1233 | atdma_desc_free(vd: &desc->vd); |
1234 | err_free_dma_buf: |
1235 | dma_pool_free(pool: atdma->memset_pool, vaddr, addr: paddr); |
1236 | return NULL; |
1237 | } |
1238 | |
1239 | /** |
1240 | * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction |
1241 | * @chan: DMA channel |
1242 | * @sgl: scatterlist to transfer to/from |
1243 | * @sg_len: number of entries in @scatterlist |
1244 | * @direction: DMA direction |
1245 | * @flags: tx descriptor status flags |
1246 | * @context: transaction context (ignored) |
1247 | */ |
1248 | static struct dma_async_tx_descriptor * |
1249 | atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
1250 | unsigned int sg_len, enum dma_transfer_direction direction, |
1251 | unsigned long flags, void *context) |
1252 | { |
1253 | struct at_dma *atdma = to_at_dma(ddev: chan->device); |
1254 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1255 | struct at_dma_slave *atslave = chan->private; |
1256 | struct dma_slave_config *sconfig = &atchan->dma_sconfig; |
1257 | struct at_desc *desc; |
1258 | u32 ctrla; |
1259 | u32 ctrlb; |
1260 | dma_addr_t reg; |
1261 | unsigned int reg_width; |
1262 | unsigned int mem_width; |
1263 | unsigned int i; |
1264 | struct scatterlist *sg; |
1265 | size_t total_len = 0; |
1266 | |
1267 | dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n" , |
1268 | sg_len, |
1269 | direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE" , |
1270 | flags); |
1271 | |
1272 | if (unlikely(!atslave || !sg_len)) { |
1273 | dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n" ); |
1274 | return NULL; |
1275 | } |
1276 | |
1277 | desc = kzalloc(struct_size(desc, sg, sg_len), GFP_ATOMIC); |
1278 | if (!desc) |
1279 | return NULL; |
1280 | desc->sglen = sg_len; |
1281 | |
1282 | ctrla = FIELD_PREP(ATC_SCSIZE, sconfig->src_maxburst) | |
1283 | FIELD_PREP(ATC_DCSIZE, sconfig->dst_maxburst); |
1284 | ctrlb = ATC_IEN; |
1285 | |
1286 | switch (direction) { |
1287 | case DMA_MEM_TO_DEV: |
1288 | reg_width = convert_buswidth(addr_width: sconfig->dst_addr_width); |
1289 | ctrla |= FIELD_PREP(ATC_DST_WIDTH, reg_width); |
1290 | ctrlb |= FIELD_PREP(ATC_DST_ADDR_MODE, |
1291 | ATC_DST_ADDR_MODE_FIXED) | |
1292 | FIELD_PREP(ATC_SRC_ADDR_MODE, ATC_SRC_ADDR_MODE_INCR) | |
1293 | FIELD_PREP(ATC_FC, ATC_FC_MEM2PER) | |
1294 | FIELD_PREP(ATC_SIF, atchan->mem_if) | |
1295 | FIELD_PREP(ATC_DIF, atchan->per_if); |
1296 | reg = sconfig->dst_addr; |
1297 | for_each_sg(sgl, sg, sg_len, i) { |
1298 | struct atdma_sg *atdma_sg = &desc->sg[i]; |
1299 | struct at_lli *lli; |
1300 | u32 len; |
1301 | u32 mem; |
1302 | |
1303 | atdma_sg->lli = dma_pool_alloc(pool: atdma->lli_pool, |
1304 | GFP_NOWAIT, |
1305 | handle: &atdma_sg->lli_phys); |
1306 | if (!atdma_sg->lli) |
1307 | goto err_desc_get; |
1308 | lli = atdma_sg->lli; |
1309 | |
1310 | mem = sg_dma_address(sg); |
1311 | len = sg_dma_len(sg); |
1312 | if (unlikely(!len)) { |
1313 | dev_dbg(chan2dev(chan), |
1314 | "prep_slave_sg: sg(%d) data length is zero\n" , i); |
1315 | goto err; |
1316 | } |
1317 | mem_width = 2; |
1318 | if (unlikely(mem & 3 || len & 3)) |
1319 | mem_width = 0; |
1320 | |
1321 | lli->saddr = mem; |
1322 | lli->daddr = reg; |
1323 | lli->ctrla = ctrla | |
1324 | FIELD_PREP(ATC_SRC_WIDTH, mem_width) | |
1325 | len >> mem_width; |
1326 | lli->ctrlb = ctrlb; |
1327 | |
1328 | atdma_sg->len = len; |
1329 | total_len += len; |
1330 | |
1331 | desc->sg[i].len = len; |
1332 | atdma_lli_chain(desc, i); |
1333 | } |
1334 | break; |
1335 | case DMA_DEV_TO_MEM: |
1336 | reg_width = convert_buswidth(addr_width: sconfig->src_addr_width); |
1337 | ctrla |= FIELD_PREP(ATC_SRC_WIDTH, reg_width); |
1338 | ctrlb |= FIELD_PREP(ATC_DST_ADDR_MODE, ATC_DST_ADDR_MODE_INCR) | |
1339 | FIELD_PREP(ATC_SRC_ADDR_MODE, |
1340 | ATC_SRC_ADDR_MODE_FIXED) | |
1341 | FIELD_PREP(ATC_FC, ATC_FC_PER2MEM) | |
1342 | FIELD_PREP(ATC_SIF, atchan->per_if) | |
1343 | FIELD_PREP(ATC_DIF, atchan->mem_if); |
1344 | |
1345 | reg = sconfig->src_addr; |
1346 | for_each_sg(sgl, sg, sg_len, i) { |
1347 | struct atdma_sg *atdma_sg = &desc->sg[i]; |
1348 | struct at_lli *lli; |
1349 | u32 len; |
1350 | u32 mem; |
1351 | |
1352 | atdma_sg->lli = dma_pool_alloc(pool: atdma->lli_pool, |
1353 | GFP_NOWAIT, |
1354 | handle: &atdma_sg->lli_phys); |
1355 | if (!atdma_sg->lli) |
1356 | goto err_desc_get; |
1357 | lli = atdma_sg->lli; |
1358 | |
1359 | mem = sg_dma_address(sg); |
1360 | len = sg_dma_len(sg); |
1361 | if (unlikely(!len)) { |
1362 | dev_dbg(chan2dev(chan), |
1363 | "prep_slave_sg: sg(%d) data length is zero\n" , i); |
1364 | goto err; |
1365 | } |
1366 | mem_width = 2; |
1367 | if (unlikely(mem & 3 || len & 3)) |
1368 | mem_width = 0; |
1369 | |
1370 | lli->saddr = reg; |
1371 | lli->daddr = mem; |
1372 | lli->ctrla = ctrla | |
1373 | FIELD_PREP(ATC_DST_WIDTH, mem_width) | |
1374 | len >> reg_width; |
1375 | lli->ctrlb = ctrlb; |
1376 | |
1377 | desc->sg[i].len = len; |
1378 | total_len += len; |
1379 | |
1380 | atdma_lli_chain(desc, i); |
1381 | } |
1382 | break; |
1383 | default: |
1384 | return NULL; |
1385 | } |
1386 | |
1387 | /* set end-of-link to the last link descriptor of list*/ |
1388 | set_lli_eol(desc, i: i - 1); |
1389 | |
1390 | desc->total_len = total_len; |
1391 | |
1392 | return vchan_tx_prep(vc: &atchan->vc, vd: &desc->vd, tx_flags: flags); |
1393 | |
1394 | err_desc_get: |
1395 | dev_err(chan2dev(chan), "not enough descriptors available\n" ); |
1396 | err: |
1397 | atdma_desc_free(vd: &desc->vd); |
1398 | return NULL; |
1399 | } |
1400 | |
1401 | /* |
1402 | * atc_dma_cyclic_check_values |
1403 | * Check for too big/unaligned periods and unaligned DMA buffer |
1404 | */ |
1405 | static int |
1406 | atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, |
1407 | size_t period_len) |
1408 | { |
1409 | if (period_len > (ATC_BTSIZE_MAX << reg_width)) |
1410 | goto err_out; |
1411 | if (unlikely(period_len & ((1 << reg_width) - 1))) |
1412 | goto err_out; |
1413 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) |
1414 | goto err_out; |
1415 | |
1416 | return 0; |
1417 | |
1418 | err_out: |
1419 | return -EINVAL; |
1420 | } |
1421 | |
1422 | /* |
1423 | * atc_dma_cyclic_fill_desc - Fill one period descriptor |
1424 | */ |
1425 | static int |
1426 | atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, |
1427 | unsigned int i, dma_addr_t buf_addr, |
1428 | unsigned int reg_width, size_t period_len, |
1429 | enum dma_transfer_direction direction) |
1430 | { |
1431 | struct at_dma *atdma = to_at_dma(ddev: chan->device); |
1432 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1433 | struct dma_slave_config *sconfig = &atchan->dma_sconfig; |
1434 | struct atdma_sg *atdma_sg = &desc->sg[i]; |
1435 | struct at_lli *lli; |
1436 | |
1437 | atdma_sg->lli = dma_pool_alloc(pool: atdma->lli_pool, GFP_ATOMIC, |
1438 | handle: &atdma_sg->lli_phys); |
1439 | if (!atdma_sg->lli) |
1440 | return -ENOMEM; |
1441 | lli = atdma_sg->lli; |
1442 | |
1443 | switch (direction) { |
1444 | case DMA_MEM_TO_DEV: |
1445 | lli->saddr = buf_addr + (period_len * i); |
1446 | lli->daddr = sconfig->dst_addr; |
1447 | lli->ctrlb = FIELD_PREP(ATC_DST_ADDR_MODE, |
1448 | ATC_DST_ADDR_MODE_FIXED) | |
1449 | FIELD_PREP(ATC_SRC_ADDR_MODE, |
1450 | ATC_SRC_ADDR_MODE_INCR) | |
1451 | FIELD_PREP(ATC_FC, ATC_FC_MEM2PER) | |
1452 | FIELD_PREP(ATC_SIF, atchan->mem_if) | |
1453 | FIELD_PREP(ATC_DIF, atchan->per_if); |
1454 | |
1455 | break; |
1456 | |
1457 | case DMA_DEV_TO_MEM: |
1458 | lli->saddr = sconfig->src_addr; |
1459 | lli->daddr = buf_addr + (period_len * i); |
1460 | lli->ctrlb = FIELD_PREP(ATC_DST_ADDR_MODE, |
1461 | ATC_DST_ADDR_MODE_INCR) | |
1462 | FIELD_PREP(ATC_SRC_ADDR_MODE, |
1463 | ATC_SRC_ADDR_MODE_FIXED) | |
1464 | FIELD_PREP(ATC_FC, ATC_FC_PER2MEM) | |
1465 | FIELD_PREP(ATC_SIF, atchan->per_if) | |
1466 | FIELD_PREP(ATC_DIF, atchan->mem_if); |
1467 | break; |
1468 | |
1469 | default: |
1470 | return -EINVAL; |
1471 | } |
1472 | |
1473 | lli->ctrla = FIELD_PREP(ATC_SCSIZE, sconfig->src_maxburst) | |
1474 | FIELD_PREP(ATC_DCSIZE, sconfig->dst_maxburst) | |
1475 | FIELD_PREP(ATC_DST_WIDTH, reg_width) | |
1476 | FIELD_PREP(ATC_SRC_WIDTH, reg_width) | |
1477 | period_len >> reg_width; |
1478 | desc->sg[i].len = period_len; |
1479 | |
1480 | return 0; |
1481 | } |
1482 | |
1483 | /** |
1484 | * atc_prep_dma_cyclic - prepare the cyclic DMA transfer |
1485 | * @chan: the DMA channel to prepare |
1486 | * @buf_addr: physical DMA address where the buffer starts |
1487 | * @buf_len: total number of bytes for the entire buffer |
1488 | * @period_len: number of bytes for each period |
1489 | * @direction: transfer direction, to or from device |
1490 | * @flags: tx descriptor status flags |
1491 | */ |
1492 | static struct dma_async_tx_descriptor * |
1493 | atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
1494 | size_t period_len, enum dma_transfer_direction direction, |
1495 | unsigned long flags) |
1496 | { |
1497 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1498 | struct at_dma_slave *atslave = chan->private; |
1499 | struct dma_slave_config *sconfig = &atchan->dma_sconfig; |
1500 | struct at_desc *desc; |
1501 | unsigned long was_cyclic; |
1502 | unsigned int reg_width; |
1503 | unsigned int periods = buf_len / period_len; |
1504 | unsigned int i; |
1505 | |
1506 | dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n" , |
1507 | direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE" , |
1508 | &buf_addr, |
1509 | periods, buf_len, period_len); |
1510 | |
1511 | if (unlikely(!atslave || !buf_len || !period_len)) { |
1512 | dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n" ); |
1513 | return NULL; |
1514 | } |
1515 | |
1516 | was_cyclic = test_and_set_bit(nr: ATC_IS_CYCLIC, addr: &atchan->status); |
1517 | if (was_cyclic) { |
1518 | dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n" ); |
1519 | return NULL; |
1520 | } |
1521 | |
1522 | if (unlikely(!is_slave_direction(direction))) |
1523 | goto err_out; |
1524 | |
1525 | if (direction == DMA_MEM_TO_DEV) |
1526 | reg_width = convert_buswidth(addr_width: sconfig->dst_addr_width); |
1527 | else |
1528 | reg_width = convert_buswidth(addr_width: sconfig->src_addr_width); |
1529 | |
1530 | /* Check for too big/unaligned periods and unaligned DMA buffer */ |
1531 | if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len)) |
1532 | goto err_out; |
1533 | |
1534 | desc = kzalloc(struct_size(desc, sg, periods), GFP_ATOMIC); |
1535 | if (!desc) |
1536 | goto err_out; |
1537 | desc->sglen = periods; |
1538 | |
1539 | /* build cyclic linked list */ |
1540 | for (i = 0; i < periods; i++) { |
1541 | if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, |
1542 | reg_width, period_len, direction)) |
1543 | goto err_fill_desc; |
1544 | atdma_lli_chain(desc, i); |
1545 | } |
1546 | desc->total_len = buf_len; |
1547 | /* lets make a cyclic list */ |
1548 | desc->sg[i - 1].lli->dscr = desc->sg[0].lli_phys; |
1549 | |
1550 | return vchan_tx_prep(vc: &atchan->vc, vd: &desc->vd, tx_flags: flags); |
1551 | |
1552 | err_fill_desc: |
1553 | atdma_desc_free(vd: &desc->vd); |
1554 | err_out: |
1555 | clear_bit(nr: ATC_IS_CYCLIC, addr: &atchan->status); |
1556 | return NULL; |
1557 | } |
1558 | |
1559 | static int atc_config(struct dma_chan *chan, |
1560 | struct dma_slave_config *sconfig) |
1561 | { |
1562 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1563 | |
1564 | dev_vdbg(chan2dev(chan), "%s\n" , __func__); |
1565 | |
1566 | /* Check if it is chan is configured for slave transfers */ |
1567 | if (!chan->private) |
1568 | return -EINVAL; |
1569 | |
1570 | memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig)); |
1571 | |
1572 | convert_burst(maxburst: &atchan->dma_sconfig.src_maxburst); |
1573 | convert_burst(maxburst: &atchan->dma_sconfig.dst_maxburst); |
1574 | |
1575 | return 0; |
1576 | } |
1577 | |
1578 | static int atc_pause(struct dma_chan *chan) |
1579 | { |
1580 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1581 | struct at_dma *atdma = to_at_dma(ddev: chan->device); |
1582 | int chan_id = atchan->vc.chan.chan_id; |
1583 | unsigned long flags; |
1584 | |
1585 | dev_vdbg(chan2dev(chan), "%s\n" , __func__); |
1586 | |
1587 | spin_lock_irqsave(&atchan->vc.lock, flags); |
1588 | |
1589 | dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); |
1590 | set_bit(nr: ATC_IS_PAUSED, addr: &atchan->status); |
1591 | |
1592 | spin_unlock_irqrestore(lock: &atchan->vc.lock, flags); |
1593 | |
1594 | return 0; |
1595 | } |
1596 | |
1597 | static int atc_resume(struct dma_chan *chan) |
1598 | { |
1599 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1600 | struct at_dma *atdma = to_at_dma(ddev: chan->device); |
1601 | int chan_id = atchan->vc.chan.chan_id; |
1602 | unsigned long flags; |
1603 | |
1604 | dev_vdbg(chan2dev(chan), "%s\n" , __func__); |
1605 | |
1606 | if (!atc_chan_is_paused(atchan)) |
1607 | return 0; |
1608 | |
1609 | spin_lock_irqsave(&atchan->vc.lock, flags); |
1610 | |
1611 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); |
1612 | clear_bit(nr: ATC_IS_PAUSED, addr: &atchan->status); |
1613 | |
1614 | spin_unlock_irqrestore(lock: &atchan->vc.lock, flags); |
1615 | |
1616 | return 0; |
1617 | } |
1618 | |
1619 | static int atc_terminate_all(struct dma_chan *chan) |
1620 | { |
1621 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1622 | struct at_dma *atdma = to_at_dma(ddev: chan->device); |
1623 | int chan_id = atchan->vc.chan.chan_id; |
1624 | unsigned long flags; |
1625 | |
1626 | LIST_HEAD(list); |
1627 | |
1628 | dev_vdbg(chan2dev(chan), "%s\n" , __func__); |
1629 | |
1630 | /* |
1631 | * This is only called when something went wrong elsewhere, so |
1632 | * we don't really care about the data. Just disable the |
1633 | * channel. We still have to poll the channel enable bit due |
1634 | * to AHB/HSB limitations. |
1635 | */ |
1636 | spin_lock_irqsave(&atchan->vc.lock, flags); |
1637 | |
1638 | /* disabling channel: must also remove suspend state */ |
1639 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); |
1640 | |
1641 | /* confirm that this channel is disabled */ |
1642 | while (dma_readl(atdma, CHSR) & atchan->mask) |
1643 | cpu_relax(); |
1644 | |
1645 | if (atchan->desc) { |
1646 | vchan_terminate_vdesc(vd: &atchan->desc->vd); |
1647 | atchan->desc = NULL; |
1648 | } |
1649 | |
1650 | vchan_get_all_descriptors(vc: &atchan->vc, head: &list); |
1651 | |
1652 | clear_bit(nr: ATC_IS_PAUSED, addr: &atchan->status); |
1653 | /* if channel dedicated to cyclic operations, free it */ |
1654 | clear_bit(nr: ATC_IS_CYCLIC, addr: &atchan->status); |
1655 | |
1656 | spin_unlock_irqrestore(lock: &atchan->vc.lock, flags); |
1657 | |
1658 | vchan_dma_desc_free_list(vc: &atchan->vc, head: &list); |
1659 | |
1660 | return 0; |
1661 | } |
1662 | |
1663 | /** |
1664 | * atc_tx_status - poll for transaction completion |
1665 | * @chan: DMA channel |
1666 | * @cookie: transaction identifier to check status of |
1667 | * @txstate: if not %NULL updated with transaction state |
1668 | * |
1669 | * If @txstate is passed in, upon return it reflect the driver |
1670 | * internal state and can be used with dma_async_is_complete() to check |
1671 | * the status of multiple cookies without re-checking hardware state. |
1672 | */ |
1673 | static enum dma_status |
1674 | atc_tx_status(struct dma_chan *chan, |
1675 | dma_cookie_t cookie, |
1676 | struct dma_tx_state *txstate) |
1677 | { |
1678 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1679 | unsigned long flags; |
1680 | enum dma_status dma_status; |
1681 | u32 residue; |
1682 | int ret; |
1683 | |
1684 | dma_status = dma_cookie_status(chan, cookie, state: txstate); |
1685 | if (dma_status == DMA_COMPLETE || !txstate) |
1686 | return dma_status; |
1687 | |
1688 | spin_lock_irqsave(&atchan->vc.lock, flags); |
1689 | /* Get number of bytes left in the active transactions */ |
1690 | ret = atc_get_residue(chan, cookie, residue: &residue); |
1691 | spin_unlock_irqrestore(lock: &atchan->vc.lock, flags); |
1692 | |
1693 | if (unlikely(ret < 0)) { |
1694 | dev_vdbg(chan2dev(chan), "get residual bytes error\n" ); |
1695 | return DMA_ERROR; |
1696 | } else { |
1697 | dma_set_residue(state: txstate, residue); |
1698 | } |
1699 | |
1700 | dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %u\n" , |
1701 | dma_status, cookie, residue); |
1702 | |
1703 | return dma_status; |
1704 | } |
1705 | |
1706 | static void atc_issue_pending(struct dma_chan *chan) |
1707 | { |
1708 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1709 | unsigned long flags; |
1710 | |
1711 | spin_lock_irqsave(&atchan->vc.lock, flags); |
1712 | if (vchan_issue_pending(vc: &atchan->vc) && !atchan->desc) { |
1713 | if (!(atc_chan_is_enabled(atchan))) |
1714 | atc_dostart(atchan); |
1715 | } |
1716 | spin_unlock_irqrestore(lock: &atchan->vc.lock, flags); |
1717 | } |
1718 | |
1719 | /** |
1720 | * atc_alloc_chan_resources - allocate resources for DMA channel |
1721 | * @chan: allocate descriptor resources for this channel |
1722 | * |
1723 | * Return: the number of allocated descriptors |
1724 | */ |
1725 | static int atc_alloc_chan_resources(struct dma_chan *chan) |
1726 | { |
1727 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1728 | struct at_dma *atdma = to_at_dma(ddev: chan->device); |
1729 | struct at_dma_slave *atslave; |
1730 | u32 cfg; |
1731 | |
1732 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n" ); |
1733 | |
1734 | /* ASSERT: channel is idle */ |
1735 | if (atc_chan_is_enabled(atchan)) { |
1736 | dev_dbg(chan2dev(chan), "DMA channel not idle ?\n" ); |
1737 | return -EIO; |
1738 | } |
1739 | |
1740 | cfg = ATC_DEFAULT_CFG; |
1741 | |
1742 | atslave = chan->private; |
1743 | if (atslave) { |
1744 | /* |
1745 | * We need controller-specific data to set up slave |
1746 | * transfers. |
1747 | */ |
1748 | BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_device.dev); |
1749 | |
1750 | /* if cfg configuration specified take it instead of default */ |
1751 | if (atslave->cfg) |
1752 | cfg = atslave->cfg; |
1753 | } |
1754 | |
1755 | /* channel parameters */ |
1756 | channel_writel(atchan, CFG, cfg); |
1757 | |
1758 | return 0; |
1759 | } |
1760 | |
1761 | /** |
1762 | * atc_free_chan_resources - free all channel resources |
1763 | * @chan: DMA channel |
1764 | */ |
1765 | static void atc_free_chan_resources(struct dma_chan *chan) |
1766 | { |
1767 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1768 | |
1769 | BUG_ON(atc_chan_is_enabled(atchan)); |
1770 | |
1771 | vchan_free_chan_resources(vc: to_virt_chan(chan)); |
1772 | atchan->status = 0; |
1773 | |
1774 | /* |
1775 | * Free atslave allocated in at_dma_xlate() |
1776 | */ |
1777 | kfree(objp: chan->private); |
1778 | chan->private = NULL; |
1779 | |
1780 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n" ); |
1781 | } |
1782 | |
1783 | #ifdef CONFIG_OF |
1784 | static bool at_dma_filter(struct dma_chan *chan, void *slave) |
1785 | { |
1786 | struct at_dma_slave *atslave = slave; |
1787 | |
1788 | if (atslave->dma_dev == chan->device->dev) { |
1789 | chan->private = atslave; |
1790 | return true; |
1791 | } else { |
1792 | return false; |
1793 | } |
1794 | } |
1795 | |
1796 | static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, |
1797 | struct of_dma *of_dma) |
1798 | { |
1799 | struct dma_chan *chan; |
1800 | struct at_dma_chan *atchan; |
1801 | struct at_dma_slave *atslave; |
1802 | dma_cap_mask_t mask; |
1803 | unsigned int per_id; |
1804 | struct platform_device *dmac_pdev; |
1805 | |
1806 | if (dma_spec->args_count != 2) |
1807 | return NULL; |
1808 | |
1809 | dmac_pdev = of_find_device_by_node(np: dma_spec->np); |
1810 | if (!dmac_pdev) |
1811 | return NULL; |
1812 | |
1813 | dma_cap_zero(mask); |
1814 | dma_cap_set(DMA_SLAVE, mask); |
1815 | |
1816 | atslave = kmalloc(size: sizeof(*atslave), GFP_KERNEL); |
1817 | if (!atslave) { |
1818 | put_device(dev: &dmac_pdev->dev); |
1819 | return NULL; |
1820 | } |
1821 | |
1822 | atslave->cfg = ATC_DST_H2SEL | ATC_SRC_H2SEL; |
1823 | /* |
1824 | * We can fill both SRC_PER and DST_PER, one of these fields will be |
1825 | * ignored depending on DMA transfer direction. |
1826 | */ |
1827 | per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK; |
1828 | atslave->cfg |= ATC_DST_PER_ID(per_id) | ATC_SRC_PER_ID(per_id); |
1829 | /* |
1830 | * We have to translate the value we get from the device tree since |
1831 | * the half FIFO configuration value had to be 0 to keep backward |
1832 | * compatibility. |
1833 | */ |
1834 | switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) { |
1835 | case AT91_DMA_CFG_FIFOCFG_ALAP: |
1836 | atslave->cfg |= FIELD_PREP(ATC_FIFOCFG, |
1837 | ATC_FIFOCFG_LARGESTBURST); |
1838 | break; |
1839 | case AT91_DMA_CFG_FIFOCFG_ASAP: |
1840 | atslave->cfg |= FIELD_PREP(ATC_FIFOCFG, |
1841 | ATC_FIFOCFG_ENOUGHSPACE); |
1842 | break; |
1843 | case AT91_DMA_CFG_FIFOCFG_HALF: |
1844 | default: |
1845 | atslave->cfg |= FIELD_PREP(ATC_FIFOCFG, ATC_FIFOCFG_HALFFIFO); |
1846 | } |
1847 | atslave->dma_dev = &dmac_pdev->dev; |
1848 | |
1849 | chan = dma_request_channel(mask, at_dma_filter, atslave); |
1850 | if (!chan) { |
1851 | put_device(dev: &dmac_pdev->dev); |
1852 | kfree(objp: atslave); |
1853 | return NULL; |
1854 | } |
1855 | |
1856 | atchan = to_at_dma_chan(chan); |
1857 | atchan->per_if = dma_spec->args[0] & 0xff; |
1858 | atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff; |
1859 | |
1860 | return chan; |
1861 | } |
1862 | #else |
1863 | static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, |
1864 | struct of_dma *of_dma) |
1865 | { |
1866 | return NULL; |
1867 | } |
1868 | #endif |
1869 | |
1870 | /*-- Module Management -----------------------------------------------*/ |
1871 | |
1872 | /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ |
1873 | static struct at_dma_platform_data at91sam9rl_config = { |
1874 | .nr_channels = 2, |
1875 | }; |
1876 | static struct at_dma_platform_data at91sam9g45_config = { |
1877 | .nr_channels = 8, |
1878 | }; |
1879 | |
1880 | #if defined(CONFIG_OF) |
1881 | static const struct of_device_id atmel_dma_dt_ids[] = { |
1882 | { |
1883 | .compatible = "atmel,at91sam9rl-dma" , |
1884 | .data = &at91sam9rl_config, |
1885 | }, { |
1886 | .compatible = "atmel,at91sam9g45-dma" , |
1887 | .data = &at91sam9g45_config, |
1888 | }, { |
1889 | /* sentinel */ |
1890 | } |
1891 | }; |
1892 | |
1893 | MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); |
1894 | #endif |
1895 | |
1896 | static const struct platform_device_id atdma_devtypes[] = { |
1897 | { |
1898 | .name = "at91sam9rl_dma" , |
1899 | .driver_data = (unsigned long) &at91sam9rl_config, |
1900 | }, { |
1901 | .name = "at91sam9g45_dma" , |
1902 | .driver_data = (unsigned long) &at91sam9g45_config, |
1903 | }, { |
1904 | /* sentinel */ |
1905 | } |
1906 | }; |
1907 | |
1908 | static inline const struct at_dma_platform_data * __init at_dma_get_driver_data( |
1909 | struct platform_device *pdev) |
1910 | { |
1911 | if (pdev->dev.of_node) { |
1912 | const struct of_device_id *match; |
1913 | match = of_match_node(matches: atmel_dma_dt_ids, node: pdev->dev.of_node); |
1914 | if (match == NULL) |
1915 | return NULL; |
1916 | return match->data; |
1917 | } |
1918 | return (struct at_dma_platform_data *) |
1919 | platform_get_device_id(pdev)->driver_data; |
1920 | } |
1921 | |
1922 | /** |
1923 | * at_dma_off - disable DMA controller |
1924 | * @atdma: the Atmel HDAMC device |
1925 | */ |
1926 | static void at_dma_off(struct at_dma *atdma) |
1927 | { |
1928 | dma_writel(atdma, EN, 0); |
1929 | |
1930 | /* disable all interrupts */ |
1931 | dma_writel(atdma, EBCIDR, -1L); |
1932 | |
1933 | /* confirm that all channels are disabled */ |
1934 | while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) |
1935 | cpu_relax(); |
1936 | } |
1937 | |
1938 | static int __init at_dma_probe(struct platform_device *pdev) |
1939 | { |
1940 | struct at_dma *atdma; |
1941 | int irq; |
1942 | int err; |
1943 | int i; |
1944 | const struct at_dma_platform_data *plat_dat; |
1945 | |
1946 | /* setup platform data for each SoC */ |
1947 | dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); |
1948 | dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask); |
1949 | dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); |
1950 | dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask); |
1951 | dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask); |
1952 | dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask); |
1953 | dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); |
1954 | |
1955 | /* get DMA parameters from controller type */ |
1956 | plat_dat = at_dma_get_driver_data(pdev); |
1957 | if (!plat_dat) |
1958 | return -ENODEV; |
1959 | |
1960 | atdma = devm_kzalloc(dev: &pdev->dev, |
1961 | struct_size(atdma, chan, plat_dat->nr_channels), |
1962 | GFP_KERNEL); |
1963 | if (!atdma) |
1964 | return -ENOMEM; |
1965 | |
1966 | atdma->regs = devm_platform_ioremap_resource(pdev, index: 0); |
1967 | if (IS_ERR(ptr: atdma->regs)) |
1968 | return PTR_ERR(ptr: atdma->regs); |
1969 | |
1970 | irq = platform_get_irq(pdev, 0); |
1971 | if (irq < 0) |
1972 | return irq; |
1973 | |
1974 | /* discover transaction capabilities */ |
1975 | atdma->dma_device.cap_mask = plat_dat->cap_mask; |
1976 | atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; |
1977 | |
1978 | atdma->clk = devm_clk_get(dev: &pdev->dev, id: "dma_clk" ); |
1979 | if (IS_ERR(ptr: atdma->clk)) |
1980 | return PTR_ERR(ptr: atdma->clk); |
1981 | |
1982 | err = clk_prepare_enable(clk: atdma->clk); |
1983 | if (err) |
1984 | return err; |
1985 | |
1986 | /* force dma off, just in case */ |
1987 | at_dma_off(atdma); |
1988 | |
1989 | err = request_irq(irq, handler: at_dma_interrupt, flags: 0, name: "at_hdmac" , dev: atdma); |
1990 | if (err) |
1991 | goto err_irq; |
1992 | |
1993 | platform_set_drvdata(pdev, data: atdma); |
1994 | |
1995 | /* create a pool of consistent memory blocks for hardware descriptors */ |
1996 | atdma->lli_pool = dma_pool_create(name: "at_hdmac_lli_pool" , |
1997 | dev: &pdev->dev, size: sizeof(struct at_lli), |
1998 | align: 4 /* word alignment */, allocation: 0); |
1999 | if (!atdma->lli_pool) { |
2000 | dev_err(&pdev->dev, "Unable to allocate DMA LLI descriptor pool\n" ); |
2001 | err = -ENOMEM; |
2002 | goto err_desc_pool_create; |
2003 | } |
2004 | |
2005 | /* create a pool of consistent memory blocks for memset blocks */ |
2006 | atdma->memset_pool = dma_pool_create(name: "at_hdmac_memset_pool" , |
2007 | dev: &pdev->dev, size: sizeof(int), align: 4, allocation: 0); |
2008 | if (!atdma->memset_pool) { |
2009 | dev_err(&pdev->dev, "No memory for memset dma pool\n" ); |
2010 | err = -ENOMEM; |
2011 | goto err_memset_pool_create; |
2012 | } |
2013 | |
2014 | /* clear any pending interrupt */ |
2015 | while (dma_readl(atdma, EBCISR)) |
2016 | cpu_relax(); |
2017 | |
2018 | /* initialize channels related values */ |
2019 | INIT_LIST_HEAD(list: &atdma->dma_device.channels); |
2020 | for (i = 0; i < plat_dat->nr_channels; i++) { |
2021 | struct at_dma_chan *atchan = &atdma->chan[i]; |
2022 | |
2023 | atchan->mem_if = AT_DMA_MEM_IF; |
2024 | atchan->per_if = AT_DMA_PER_IF; |
2025 | |
2026 | atchan->ch_regs = atdma->regs + ch_regs(i); |
2027 | atchan->mask = 1 << i; |
2028 | |
2029 | atchan->atdma = atdma; |
2030 | atchan->vc.desc_free = atdma_desc_free; |
2031 | vchan_init(vc: &atchan->vc, dmadev: &atdma->dma_device); |
2032 | atc_enable_chan_irq(atdma, chan_id: i); |
2033 | } |
2034 | |
2035 | /* set base routines */ |
2036 | atdma->dma_device.device_alloc_chan_resources = atc_alloc_chan_resources; |
2037 | atdma->dma_device.device_free_chan_resources = atc_free_chan_resources; |
2038 | atdma->dma_device.device_tx_status = atc_tx_status; |
2039 | atdma->dma_device.device_issue_pending = atc_issue_pending; |
2040 | atdma->dma_device.dev = &pdev->dev; |
2041 | |
2042 | /* set prep routines based on capability */ |
2043 | if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_device.cap_mask)) |
2044 | atdma->dma_device.device_prep_interleaved_dma = atc_prep_dma_interleaved; |
2045 | |
2046 | if (dma_has_cap(DMA_MEMCPY, atdma->dma_device.cap_mask)) |
2047 | atdma->dma_device.device_prep_dma_memcpy = atc_prep_dma_memcpy; |
2048 | |
2049 | if (dma_has_cap(DMA_MEMSET, atdma->dma_device.cap_mask)) { |
2050 | atdma->dma_device.device_prep_dma_memset = atc_prep_dma_memset; |
2051 | atdma->dma_device.device_prep_dma_memset_sg = atc_prep_dma_memset_sg; |
2052 | atdma->dma_device.fill_align = DMAENGINE_ALIGN_4_BYTES; |
2053 | } |
2054 | |
2055 | if (dma_has_cap(DMA_SLAVE, atdma->dma_device.cap_mask)) { |
2056 | atdma->dma_device.device_prep_slave_sg = atc_prep_slave_sg; |
2057 | /* controller can do slave DMA: can trigger cyclic transfers */ |
2058 | dma_cap_set(DMA_CYCLIC, atdma->dma_device.cap_mask); |
2059 | atdma->dma_device.device_prep_dma_cyclic = atc_prep_dma_cyclic; |
2060 | atdma->dma_device.device_config = atc_config; |
2061 | atdma->dma_device.device_pause = atc_pause; |
2062 | atdma->dma_device.device_resume = atc_resume; |
2063 | atdma->dma_device.device_terminate_all = atc_terminate_all; |
2064 | atdma->dma_device.src_addr_widths = ATC_DMA_BUSWIDTHS; |
2065 | atdma->dma_device.dst_addr_widths = ATC_DMA_BUSWIDTHS; |
2066 | atdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
2067 | atdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
2068 | } |
2069 | |
2070 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
2071 | |
2072 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n" , |
2073 | dma_has_cap(DMA_MEMCPY, atdma->dma_device.cap_mask) ? "cpy " : "" , |
2074 | dma_has_cap(DMA_MEMSET, atdma->dma_device.cap_mask) ? "set " : "" , |
2075 | dma_has_cap(DMA_SLAVE, atdma->dma_device.cap_mask) ? "slave " : "" , |
2076 | plat_dat->nr_channels); |
2077 | |
2078 | err = dma_async_device_register(device: &atdma->dma_device); |
2079 | if (err) { |
2080 | dev_err(&pdev->dev, "Unable to register: %d.\n" , err); |
2081 | goto err_dma_async_device_register; |
2082 | } |
2083 | |
2084 | /* |
2085 | * Do not return an error if the dmac node is not present in order to |
2086 | * not break the existing way of requesting channel with |
2087 | * dma_request_channel(). |
2088 | */ |
2089 | if (pdev->dev.of_node) { |
2090 | err = of_dma_controller_register(np: pdev->dev.of_node, |
2091 | of_dma_xlate: at_dma_xlate, data: atdma); |
2092 | if (err) { |
2093 | dev_err(&pdev->dev, "could not register of_dma_controller\n" ); |
2094 | goto err_of_dma_controller_register; |
2095 | } |
2096 | } |
2097 | |
2098 | return 0; |
2099 | |
2100 | err_of_dma_controller_register: |
2101 | dma_async_device_unregister(device: &atdma->dma_device); |
2102 | err_dma_async_device_register: |
2103 | dma_pool_destroy(pool: atdma->memset_pool); |
2104 | err_memset_pool_create: |
2105 | dma_pool_destroy(pool: atdma->lli_pool); |
2106 | err_desc_pool_create: |
2107 | free_irq(platform_get_irq(pdev, 0), atdma); |
2108 | err_irq: |
2109 | clk_disable_unprepare(clk: atdma->clk); |
2110 | return err; |
2111 | } |
2112 | |
2113 | static void at_dma_remove(struct platform_device *pdev) |
2114 | { |
2115 | struct at_dma *atdma = platform_get_drvdata(pdev); |
2116 | struct dma_chan *chan, *_chan; |
2117 | |
2118 | at_dma_off(atdma); |
2119 | if (pdev->dev.of_node) |
2120 | of_dma_controller_free(np: pdev->dev.of_node); |
2121 | dma_async_device_unregister(device: &atdma->dma_device); |
2122 | |
2123 | dma_pool_destroy(pool: atdma->memset_pool); |
2124 | dma_pool_destroy(pool: atdma->lli_pool); |
2125 | free_irq(platform_get_irq(pdev, 0), atdma); |
2126 | |
2127 | list_for_each_entry_safe(chan, _chan, &atdma->dma_device.channels, |
2128 | device_node) { |
2129 | /* Disable interrupts */ |
2130 | atc_disable_chan_irq(atdma, chan_id: chan->chan_id); |
2131 | list_del(entry: &chan->device_node); |
2132 | } |
2133 | |
2134 | clk_disable_unprepare(clk: atdma->clk); |
2135 | } |
2136 | |
2137 | static void at_dma_shutdown(struct platform_device *pdev) |
2138 | { |
2139 | struct at_dma *atdma = platform_get_drvdata(pdev); |
2140 | |
2141 | at_dma_off(atdma: platform_get_drvdata(pdev)); |
2142 | clk_disable_unprepare(clk: atdma->clk); |
2143 | } |
2144 | |
2145 | static int at_dma_prepare(struct device *dev) |
2146 | { |
2147 | struct at_dma *atdma = dev_get_drvdata(dev); |
2148 | struct dma_chan *chan, *_chan; |
2149 | |
2150 | list_for_each_entry_safe(chan, _chan, &atdma->dma_device.channels, |
2151 | device_node) { |
2152 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
2153 | /* wait for transaction completion (except in cyclic case) */ |
2154 | if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) |
2155 | return -EAGAIN; |
2156 | } |
2157 | return 0; |
2158 | } |
2159 | |
2160 | static void atc_suspend_cyclic(struct at_dma_chan *atchan) |
2161 | { |
2162 | struct dma_chan *chan = &atchan->vc.chan; |
2163 | |
2164 | /* Channel should be paused by user |
2165 | * do it anyway even if it is not done already */ |
2166 | if (!atc_chan_is_paused(atchan)) { |
2167 | dev_warn(chan2dev(chan), |
2168 | "cyclic channel not paused, should be done by channel user\n" ); |
2169 | atc_pause(chan); |
2170 | } |
2171 | |
2172 | /* now preserve additional data for cyclic operations */ |
2173 | /* next descriptor address in the cyclic list */ |
2174 | atchan->save_dscr = channel_readl(atchan, DSCR); |
2175 | |
2176 | vdbg_dump_regs(atchan); |
2177 | } |
2178 | |
2179 | static int at_dma_suspend_noirq(struct device *dev) |
2180 | { |
2181 | struct at_dma *atdma = dev_get_drvdata(dev); |
2182 | struct dma_chan *chan, *_chan; |
2183 | |
2184 | /* preserve data */ |
2185 | list_for_each_entry_safe(chan, _chan, &atdma->dma_device.channels, |
2186 | device_node) { |
2187 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
2188 | |
2189 | if (atc_chan_is_cyclic(atchan)) |
2190 | atc_suspend_cyclic(atchan); |
2191 | atchan->save_cfg = channel_readl(atchan, CFG); |
2192 | } |
2193 | atdma->save_imr = dma_readl(atdma, EBCIMR); |
2194 | |
2195 | /* disable DMA controller */ |
2196 | at_dma_off(atdma); |
2197 | clk_disable_unprepare(clk: atdma->clk); |
2198 | return 0; |
2199 | } |
2200 | |
2201 | static void atc_resume_cyclic(struct at_dma_chan *atchan) |
2202 | { |
2203 | struct at_dma *atdma = to_at_dma(ddev: atchan->vc.chan.device); |
2204 | |
2205 | /* restore channel status for cyclic descriptors list: |
2206 | * next descriptor in the cyclic list at the time of suspend */ |
2207 | channel_writel(atchan, SADDR, 0); |
2208 | channel_writel(atchan, DADDR, 0); |
2209 | channel_writel(atchan, CTRLA, 0); |
2210 | channel_writel(atchan, CTRLB, 0); |
2211 | channel_writel(atchan, DSCR, atchan->save_dscr); |
2212 | dma_writel(atdma, CHER, atchan->mask); |
2213 | |
2214 | /* channel pause status should be removed by channel user |
2215 | * We cannot take the initiative to do it here */ |
2216 | |
2217 | vdbg_dump_regs(atchan); |
2218 | } |
2219 | |
2220 | static int at_dma_resume_noirq(struct device *dev) |
2221 | { |
2222 | struct at_dma *atdma = dev_get_drvdata(dev); |
2223 | struct dma_chan *chan, *_chan; |
2224 | |
2225 | /* bring back DMA controller */ |
2226 | clk_prepare_enable(clk: atdma->clk); |
2227 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
2228 | |
2229 | /* clear any pending interrupt */ |
2230 | while (dma_readl(atdma, EBCISR)) |
2231 | cpu_relax(); |
2232 | |
2233 | /* restore saved data */ |
2234 | dma_writel(atdma, EBCIER, atdma->save_imr); |
2235 | list_for_each_entry_safe(chan, _chan, &atdma->dma_device.channels, |
2236 | device_node) { |
2237 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
2238 | |
2239 | channel_writel(atchan, CFG, atchan->save_cfg); |
2240 | if (atc_chan_is_cyclic(atchan)) |
2241 | atc_resume_cyclic(atchan); |
2242 | } |
2243 | return 0; |
2244 | } |
2245 | |
2246 | static const struct dev_pm_ops __maybe_unused at_dma_dev_pm_ops = { |
2247 | .prepare = at_dma_prepare, |
2248 | .suspend_noirq = at_dma_suspend_noirq, |
2249 | .resume_noirq = at_dma_resume_noirq, |
2250 | }; |
2251 | |
2252 | static struct platform_driver at_dma_driver = { |
2253 | .remove_new = at_dma_remove, |
2254 | .shutdown = at_dma_shutdown, |
2255 | .id_table = atdma_devtypes, |
2256 | .driver = { |
2257 | .name = "at_hdmac" , |
2258 | .pm = pm_ptr(&at_dma_dev_pm_ops), |
2259 | .of_match_table = of_match_ptr(atmel_dma_dt_ids), |
2260 | }, |
2261 | }; |
2262 | |
2263 | static int __init at_dma_init(void) |
2264 | { |
2265 | return platform_driver_probe(&at_dma_driver, at_dma_probe); |
2266 | } |
2267 | subsys_initcall(at_dma_init); |
2268 | |
2269 | static void __exit at_dma_exit(void) |
2270 | { |
2271 | platform_driver_unregister(&at_dma_driver); |
2272 | } |
2273 | module_exit(at_dma_exit); |
2274 | |
2275 | MODULE_DESCRIPTION("Atmel AHB DMA Controller driver" ); |
2276 | MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>" ); |
2277 | MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>" ); |
2278 | MODULE_LICENSE("GPL" ); |
2279 | MODULE_ALIAS("platform:at_hdmac" ); |
2280 | |