1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Driver for STM32 DMA controller |
4 | * |
5 | * Inspired by dma-jz4740.c and tegra20-apb-dma.c |
6 | * |
7 | * Copyright (C) M'boumba Cedric Madianga 2015 |
8 | * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> |
9 | * Pierre-Yves Mordret <pierre-yves.mordret@st.com> |
10 | */ |
11 | |
12 | #include <linux/bitfield.h> |
13 | #include <linux/clk.h> |
14 | #include <linux/delay.h> |
15 | #include <linux/dmaengine.h> |
16 | #include <linux/dma-mapping.h> |
17 | #include <linux/err.h> |
18 | #include <linux/init.h> |
19 | #include <linux/iopoll.h> |
20 | #include <linux/jiffies.h> |
21 | #include <linux/list.h> |
22 | #include <linux/module.h> |
23 | #include <linux/of.h> |
24 | #include <linux/of_dma.h> |
25 | #include <linux/platform_device.h> |
26 | #include <linux/pm_runtime.h> |
27 | #include <linux/reset.h> |
28 | #include <linux/sched.h> |
29 | #include <linux/slab.h> |
30 | |
31 | #include "virt-dma.h" |
32 | |
33 | #define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */ |
34 | #define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */ |
35 | #define STM32_DMA_ISR(n) (((n) & 4) ? STM32_DMA_HISR : STM32_DMA_LISR) |
36 | #define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */ |
37 | #define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */ |
38 | #define STM32_DMA_IFCR(n) (((n) & 4) ? STM32_DMA_HIFCR : STM32_DMA_LIFCR) |
39 | #define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */ |
40 | #define STM32_DMA_HTI BIT(4) /* Half Transfer Interrupt */ |
41 | #define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */ |
42 | #define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */ |
43 | #define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */ |
44 | #define STM32_DMA_MASKI (STM32_DMA_TCI \ |
45 | | STM32_DMA_TEI \ |
46 | | STM32_DMA_DMEI \ |
47 | | STM32_DMA_FEI) |
48 | /* |
49 | * If (chan->id % 4) is 2 or 3, left shift the mask by 16 bits; |
50 | * if (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits. |
51 | */ |
52 | #define STM32_DMA_FLAGS_SHIFT(n) ({ typeof(n) (_n) = (n); \ |
53 | (((_n) & 2) << 3) | (((_n) & 1) * 6); }) |
54 | |
55 | /* DMA Stream x Configuration Register */ |
56 | #define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */ |
57 | #define STM32_DMA_SCR_REQ_MASK GENMASK(27, 25) |
58 | #define STM32_DMA_SCR_MBURST_MASK GENMASK(24, 23) |
59 | #define STM32_DMA_SCR_PBURST_MASK GENMASK(22, 21) |
60 | #define STM32_DMA_SCR_PL_MASK GENMASK(17, 16) |
61 | #define STM32_DMA_SCR_MSIZE_MASK GENMASK(14, 13) |
62 | #define STM32_DMA_SCR_PSIZE_MASK GENMASK(12, 11) |
63 | #define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6) |
64 | #define STM32_DMA_SCR_TRBUFF BIT(20) /* Bufferable transfer for USART/UART */ |
65 | #define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */ |
66 | #define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */ |
67 | #define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */ |
68 | #define STM32_DMA_SCR_MINC BIT(10) /* Memory increment mode */ |
69 | #define STM32_DMA_SCR_PINC BIT(9) /* Peripheral increment mode */ |
70 | #define STM32_DMA_SCR_CIRC BIT(8) /* Circular mode */ |
71 | #define STM32_DMA_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */ |
72 | #define STM32_DMA_SCR_TCIE BIT(4) /* Transfer Complete Int Enable |
73 | */ |
74 | #define STM32_DMA_SCR_TEIE BIT(2) /* Transfer Error Int Enable */ |
75 | #define STM32_DMA_SCR_DMEIE BIT(1) /* Direct Mode Err Int Enable */ |
76 | #define STM32_DMA_SCR_EN BIT(0) /* Stream Enable */ |
77 | #define STM32_DMA_SCR_CFG_MASK (STM32_DMA_SCR_PINC \ |
78 | | STM32_DMA_SCR_MINC \ |
79 | | STM32_DMA_SCR_PINCOS \ |
80 | | STM32_DMA_SCR_PL_MASK) |
81 | #define STM32_DMA_SCR_IRQ_MASK (STM32_DMA_SCR_TCIE \ |
82 | | STM32_DMA_SCR_TEIE \ |
83 | | STM32_DMA_SCR_DMEIE) |
84 | |
85 | /* DMA Stream x number of data register */ |
86 | #define STM32_DMA_SNDTR(x) (0x0014 + 0x18 * (x)) |
87 | |
88 | /* DMA stream peripheral address register */ |
89 | #define STM32_DMA_SPAR(x) (0x0018 + 0x18 * (x)) |
90 | |
91 | /* DMA stream x memory 0 address register */ |
92 | #define STM32_DMA_SM0AR(x) (0x001c + 0x18 * (x)) |
93 | |
94 | /* DMA stream x memory 1 address register */ |
95 | #define STM32_DMA_SM1AR(x) (0x0020 + 0x18 * (x)) |
96 | |
97 | /* DMA stream x FIFO control register */ |
98 | #define STM32_DMA_SFCR(x) (0x0024 + 0x18 * (x)) |
99 | #define STM32_DMA_SFCR_FTH_MASK GENMASK(1, 0) |
100 | #define STM32_DMA_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */ |
101 | #define STM32_DMA_SFCR_DMDIS BIT(2) /* Direct mode disable */ |
102 | #define STM32_DMA_SFCR_MASK (STM32_DMA_SFCR_FEIE \ |
103 | | STM32_DMA_SFCR_DMDIS) |
104 | |
105 | /* DMA direction */ |
106 | #define STM32_DMA_DEV_TO_MEM 0x00 |
107 | #define STM32_DMA_MEM_TO_DEV 0x01 |
108 | #define STM32_DMA_MEM_TO_MEM 0x02 |
109 | |
110 | /* DMA priority level */ |
111 | #define STM32_DMA_PRIORITY_LOW 0x00 |
112 | #define STM32_DMA_PRIORITY_MEDIUM 0x01 |
113 | #define STM32_DMA_PRIORITY_HIGH 0x02 |
114 | #define STM32_DMA_PRIORITY_VERY_HIGH 0x03 |
115 | |
116 | /* DMA FIFO threshold selection */ |
117 | #define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00 |
118 | #define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01 |
119 | #define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02 |
120 | #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03 |
121 | #define STM32_DMA_FIFO_THRESHOLD_NONE 0x04 |
122 | |
123 | #define STM32_DMA_MAX_DATA_ITEMS 0xffff |
124 | /* |
125 | * Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter |
126 | * gather at boundary. Thus it's safer to round down this value on FIFO |
127 | * size (16 Bytes) |
128 | */ |
129 | #define STM32_DMA_ALIGNED_MAX_DATA_ITEMS \ |
130 | ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16) |
131 | #define STM32_DMA_MAX_CHANNELS 0x08 |
132 | #define STM32_DMA_MAX_REQUEST_ID 0x08 |
133 | #define STM32_DMA_MAX_DATA_PARAM 0x03 |
134 | #define STM32_DMA_FIFO_SIZE 16 /* FIFO is 16 bytes */ |
135 | #define STM32_DMA_MIN_BURST 4 |
136 | #define STM32_DMA_MAX_BURST 16 |
137 | |
138 | /* DMA Features */ |
139 | #define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0) |
140 | #define STM32_DMA_DIRECT_MODE_MASK BIT(2) |
141 | #define STM32_DMA_ALT_ACK_MODE_MASK BIT(4) |
142 | #define STM32_DMA_MDMA_STREAM_ID_MASK GENMASK(19, 16) |
143 | |
144 | enum stm32_dma_width { |
145 | STM32_DMA_BYTE, |
146 | STM32_DMA_HALF_WORD, |
147 | STM32_DMA_WORD, |
148 | }; |
149 | |
150 | enum stm32_dma_burst_size { |
151 | STM32_DMA_BURST_SINGLE, |
152 | STM32_DMA_BURST_INCR4, |
153 | STM32_DMA_BURST_INCR8, |
154 | STM32_DMA_BURST_INCR16, |
155 | }; |
156 | |
157 | /** |
158 | * struct stm32_dma_cfg - STM32 DMA custom configuration |
159 | * @channel_id: channel ID |
160 | * @request_line: DMA request |
161 | * @stream_config: 32bit mask specifying the DMA channel configuration |
162 | * @features: 32bit mask specifying the DMA Feature list |
163 | */ |
164 | struct stm32_dma_cfg { |
165 | u32 channel_id; |
166 | u32 request_line; |
167 | u32 stream_config; |
168 | u32 features; |
169 | }; |
170 | |
171 | struct stm32_dma_chan_reg { |
172 | u32 dma_lisr; |
173 | u32 dma_hisr; |
174 | u32 dma_lifcr; |
175 | u32 dma_hifcr; |
176 | u32 dma_scr; |
177 | u32 dma_sndtr; |
178 | u32 dma_spar; |
179 | u32 dma_sm0ar; |
180 | u32 dma_sm1ar; |
181 | u32 dma_sfcr; |
182 | }; |
183 | |
184 | struct stm32_dma_sg_req { |
185 | u32 len; |
186 | struct stm32_dma_chan_reg chan_reg; |
187 | }; |
188 | |
189 | struct stm32_dma_desc { |
190 | struct virt_dma_desc vdesc; |
191 | bool cyclic; |
192 | u32 num_sgs; |
193 | struct stm32_dma_sg_req sg_req[] __counted_by(num_sgs); |
194 | }; |
195 | |
196 | /** |
197 | * struct stm32_dma_mdma_config - STM32 DMA MDMA configuration |
198 | * @stream_id: DMA request to trigger STM32 MDMA transfer |
199 | * @ifcr: DMA interrupt flag clear register address, |
200 | * used by STM32 MDMA to clear DMA Transfer Complete flag |
201 | * @tcf: DMA Transfer Complete flag |
202 | */ |
203 | struct stm32_dma_mdma_config { |
204 | u32 stream_id; |
205 | u32 ifcr; |
206 | u32 tcf; |
207 | }; |
208 | |
209 | struct stm32_dma_chan { |
210 | struct virt_dma_chan vchan; |
211 | bool config_init; |
212 | bool busy; |
213 | u32 id; |
214 | u32 irq; |
215 | struct stm32_dma_desc *desc; |
216 | u32 next_sg; |
217 | struct dma_slave_config dma_sconfig; |
218 | struct stm32_dma_chan_reg chan_reg; |
219 | u32 threshold; |
220 | u32 mem_burst; |
221 | u32 mem_width; |
222 | enum dma_status status; |
223 | bool trig_mdma; |
224 | struct stm32_dma_mdma_config mdma_config; |
225 | }; |
226 | |
227 | struct stm32_dma_device { |
228 | struct dma_device ddev; |
229 | void __iomem *base; |
230 | struct clk *clk; |
231 | bool mem2mem; |
232 | struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS]; |
233 | }; |
234 | |
235 | static struct stm32_dma_device *stm32_dma_get_dev(struct stm32_dma_chan *chan) |
236 | { |
237 | return container_of(chan->vchan.chan.device, struct stm32_dma_device, |
238 | ddev); |
239 | } |
240 | |
241 | static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c) |
242 | { |
243 | return container_of(c, struct stm32_dma_chan, vchan.chan); |
244 | } |
245 | |
246 | static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc) |
247 | { |
248 | return container_of(vdesc, struct stm32_dma_desc, vdesc); |
249 | } |
250 | |
251 | static struct device *chan2dev(struct stm32_dma_chan *chan) |
252 | { |
253 | return &chan->vchan.chan.dev->device; |
254 | } |
255 | |
256 | static u32 stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg) |
257 | { |
258 | return readl_relaxed(dmadev->base + reg); |
259 | } |
260 | |
261 | static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val) |
262 | { |
263 | writel_relaxed(val, dmadev->base + reg); |
264 | } |
265 | |
266 | static int stm32_dma_get_width(struct stm32_dma_chan *chan, |
267 | enum dma_slave_buswidth width) |
268 | { |
269 | switch (width) { |
270 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
271 | return STM32_DMA_BYTE; |
272 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
273 | return STM32_DMA_HALF_WORD; |
274 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
275 | return STM32_DMA_WORD; |
276 | default: |
277 | dev_err(chan2dev(chan), "Dma bus width not supported\n" ); |
278 | return -EINVAL; |
279 | } |
280 | } |
281 | |
282 | static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len, |
283 | dma_addr_t buf_addr, |
284 | u32 threshold) |
285 | { |
286 | enum dma_slave_buswidth max_width; |
287 | |
288 | if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL) |
289 | max_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
290 | else |
291 | max_width = DMA_SLAVE_BUSWIDTH_2_BYTES; |
292 | |
293 | while ((buf_len < max_width || buf_len % max_width) && |
294 | max_width > DMA_SLAVE_BUSWIDTH_1_BYTE) |
295 | max_width = max_width >> 1; |
296 | |
297 | if (buf_addr & (max_width - 1)) |
298 | max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; |
299 | |
300 | return max_width; |
301 | } |
302 | |
303 | static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold, |
304 | enum dma_slave_buswidth width) |
305 | { |
306 | u32 remaining; |
307 | |
308 | if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE) |
309 | return false; |
310 | |
311 | if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) { |
312 | if (burst != 0) { |
313 | /* |
314 | * If number of beats fit in several whole bursts |
315 | * this configuration is allowed. |
316 | */ |
317 | remaining = ((STM32_DMA_FIFO_SIZE / width) * |
318 | (threshold + 1) / 4) % burst; |
319 | |
320 | if (remaining == 0) |
321 | return true; |
322 | } else { |
323 | return true; |
324 | } |
325 | } |
326 | |
327 | return false; |
328 | } |
329 | |
330 | static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold) |
331 | { |
332 | /* If FIFO direct mode, burst is not possible */ |
333 | if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE) |
334 | return false; |
335 | |
336 | /* |
337 | * Buffer or period length has to be aligned on FIFO depth. |
338 | * Otherwise bytes may be stuck within FIFO at buffer or period |
339 | * length. |
340 | */ |
341 | return ((buf_len % ((threshold + 1) * 4)) == 0); |
342 | } |
343 | |
344 | static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold, |
345 | enum dma_slave_buswidth width) |
346 | { |
347 | u32 best_burst = max_burst; |
348 | |
349 | if (best_burst == 1 || !stm32_dma_is_burst_possible(buf_len, threshold)) |
350 | return 0; |
351 | |
352 | while ((buf_len < best_burst * width && best_burst > 1) || |
353 | !stm32_dma_fifo_threshold_is_allowed(burst: best_burst, threshold, |
354 | width)) { |
355 | if (best_burst > STM32_DMA_MIN_BURST) |
356 | best_burst = best_burst >> 1; |
357 | else |
358 | best_burst = 0; |
359 | } |
360 | |
361 | return best_burst; |
362 | } |
363 | |
364 | static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst) |
365 | { |
366 | switch (maxburst) { |
367 | case 0: |
368 | case 1: |
369 | return STM32_DMA_BURST_SINGLE; |
370 | case 4: |
371 | return STM32_DMA_BURST_INCR4; |
372 | case 8: |
373 | return STM32_DMA_BURST_INCR8; |
374 | case 16: |
375 | return STM32_DMA_BURST_INCR16; |
376 | default: |
377 | dev_err(chan2dev(chan), "Dma burst size not supported\n" ); |
378 | return -EINVAL; |
379 | } |
380 | } |
381 | |
382 | static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan, |
383 | u32 src_burst, u32 dst_burst) |
384 | { |
385 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK; |
386 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE; |
387 | |
388 | if (!src_burst && !dst_burst) { |
389 | /* Using direct mode */ |
390 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE; |
391 | } else { |
392 | /* Using FIFO mode */ |
393 | chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK; |
394 | } |
395 | } |
396 | |
397 | static int stm32_dma_slave_config(struct dma_chan *c, |
398 | struct dma_slave_config *config) |
399 | { |
400 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); |
401 | |
402 | memcpy(&chan->dma_sconfig, config, sizeof(*config)); |
403 | |
404 | /* Check if user is requesting DMA to trigger STM32 MDMA */ |
405 | if (config->peripheral_size) { |
406 | config->peripheral_config = &chan->mdma_config; |
407 | config->peripheral_size = sizeof(chan->mdma_config); |
408 | chan->trig_mdma = true; |
409 | } |
410 | |
411 | chan->config_init = true; |
412 | |
413 | return 0; |
414 | } |
415 | |
416 | static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan) |
417 | { |
418 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
419 | u32 flags, dma_isr; |
420 | |
421 | /* |
422 | * Read "flags" from DMA_xISR register corresponding to the selected |
423 | * DMA channel at the correct bit offset inside that register. |
424 | */ |
425 | |
426 | dma_isr = stm32_dma_read(dmadev, STM32_DMA_ISR(chan->id)); |
427 | flags = dma_isr >> STM32_DMA_FLAGS_SHIFT(chan->id); |
428 | |
429 | return flags & STM32_DMA_MASKI; |
430 | } |
431 | |
432 | static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags) |
433 | { |
434 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
435 | u32 dma_ifcr; |
436 | |
437 | /* |
438 | * Write "flags" to the DMA_xIFCR register corresponding to the selected |
439 | * DMA channel at the correct bit offset inside that register. |
440 | */ |
441 | flags &= STM32_DMA_MASKI; |
442 | dma_ifcr = flags << STM32_DMA_FLAGS_SHIFT(chan->id); |
443 | |
444 | stm32_dma_write(dmadev, STM32_DMA_IFCR(chan->id), val: dma_ifcr); |
445 | } |
446 | |
447 | static int stm32_dma_disable_chan(struct stm32_dma_chan *chan) |
448 | { |
449 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
450 | u32 dma_scr, id, reg; |
451 | |
452 | id = chan->id; |
453 | reg = STM32_DMA_SCR(id); |
454 | dma_scr = stm32_dma_read(dmadev, reg); |
455 | |
456 | if (dma_scr & STM32_DMA_SCR_EN) { |
457 | dma_scr &= ~STM32_DMA_SCR_EN; |
458 | stm32_dma_write(dmadev, reg, val: dma_scr); |
459 | |
460 | return readl_relaxed_poll_timeout_atomic(dmadev->base + reg, |
461 | dma_scr, !(dma_scr & STM32_DMA_SCR_EN), |
462 | 10, 1000000); |
463 | } |
464 | |
465 | return 0; |
466 | } |
467 | |
468 | static void stm32_dma_stop(struct stm32_dma_chan *chan) |
469 | { |
470 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
471 | u32 dma_scr, dma_sfcr, status; |
472 | int ret; |
473 | |
474 | /* Disable interrupts */ |
475 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); |
476 | dma_scr &= ~STM32_DMA_SCR_IRQ_MASK; |
477 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), val: dma_scr); |
478 | dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); |
479 | dma_sfcr &= ~STM32_DMA_SFCR_FEIE; |
480 | stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), val: dma_sfcr); |
481 | |
482 | /* Disable DMA */ |
483 | ret = stm32_dma_disable_chan(chan); |
484 | if (ret < 0) |
485 | return; |
486 | |
487 | /* Clear interrupt status if it is there */ |
488 | status = stm32_dma_irq_status(chan); |
489 | if (status) { |
490 | dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n" , |
491 | __func__, status); |
492 | stm32_dma_irq_clear(chan, flags: status); |
493 | } |
494 | |
495 | chan->busy = false; |
496 | chan->status = DMA_COMPLETE; |
497 | } |
498 | |
499 | static int stm32_dma_terminate_all(struct dma_chan *c) |
500 | { |
501 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); |
502 | unsigned long flags; |
503 | LIST_HEAD(head); |
504 | |
505 | spin_lock_irqsave(&chan->vchan.lock, flags); |
506 | |
507 | if (chan->desc) { |
508 | dma_cookie_complete(tx: &chan->desc->vdesc.tx); |
509 | vchan_terminate_vdesc(vd: &chan->desc->vdesc); |
510 | if (chan->busy) |
511 | stm32_dma_stop(chan); |
512 | chan->desc = NULL; |
513 | } |
514 | |
515 | vchan_get_all_descriptors(vc: &chan->vchan, head: &head); |
516 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
517 | vchan_dma_desc_free_list(vc: &chan->vchan, head: &head); |
518 | |
519 | return 0; |
520 | } |
521 | |
522 | static void stm32_dma_synchronize(struct dma_chan *c) |
523 | { |
524 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); |
525 | |
526 | vchan_synchronize(vc: &chan->vchan); |
527 | } |
528 | |
529 | static void stm32_dma_dump_reg(struct stm32_dma_chan *chan) |
530 | { |
531 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
532 | u32 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); |
533 | u32 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); |
534 | u32 spar = stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id)); |
535 | u32 sm0ar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id)); |
536 | u32 sm1ar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id)); |
537 | u32 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); |
538 | |
539 | dev_dbg(chan2dev(chan), "SCR: 0x%08x\n" , scr); |
540 | dev_dbg(chan2dev(chan), "NDTR: 0x%08x\n" , ndtr); |
541 | dev_dbg(chan2dev(chan), "SPAR: 0x%08x\n" , spar); |
542 | dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n" , sm0ar); |
543 | dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n" , sm1ar); |
544 | dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n" , sfcr); |
545 | } |
546 | |
547 | static void stm32_dma_sg_inc(struct stm32_dma_chan *chan) |
548 | { |
549 | chan->next_sg++; |
550 | if (chan->desc->cyclic && (chan->next_sg == chan->desc->num_sgs)) |
551 | chan->next_sg = 0; |
552 | } |
553 | |
554 | static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan); |
555 | |
556 | static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) |
557 | { |
558 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
559 | struct virt_dma_desc *vdesc; |
560 | struct stm32_dma_sg_req *sg_req; |
561 | struct stm32_dma_chan_reg *reg; |
562 | u32 status; |
563 | int ret; |
564 | |
565 | ret = stm32_dma_disable_chan(chan); |
566 | if (ret < 0) |
567 | return; |
568 | |
569 | if (!chan->desc) { |
570 | vdesc = vchan_next_desc(vc: &chan->vchan); |
571 | if (!vdesc) |
572 | return; |
573 | |
574 | list_del(entry: &vdesc->node); |
575 | |
576 | chan->desc = to_stm32_dma_desc(vdesc); |
577 | chan->next_sg = 0; |
578 | } |
579 | |
580 | if (chan->next_sg == chan->desc->num_sgs) |
581 | chan->next_sg = 0; |
582 | |
583 | sg_req = &chan->desc->sg_req[chan->next_sg]; |
584 | reg = &sg_req->chan_reg; |
585 | |
586 | /* When DMA triggers STM32 MDMA, DMA Transfer Complete is managed by STM32 MDMA */ |
587 | if (chan->trig_mdma && chan->dma_sconfig.direction != DMA_MEM_TO_DEV) |
588 | reg->dma_scr &= ~STM32_DMA_SCR_TCIE; |
589 | |
590 | reg->dma_scr &= ~STM32_DMA_SCR_EN; |
591 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), val: reg->dma_scr); |
592 | stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), val: reg->dma_spar); |
593 | stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), val: reg->dma_sm0ar); |
594 | stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), val: reg->dma_sfcr); |
595 | stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), val: reg->dma_sm1ar); |
596 | stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), val: reg->dma_sndtr); |
597 | |
598 | stm32_dma_sg_inc(chan); |
599 | |
600 | /* Clear interrupt status if it is there */ |
601 | status = stm32_dma_irq_status(chan); |
602 | if (status) |
603 | stm32_dma_irq_clear(chan, flags: status); |
604 | |
605 | if (chan->desc->cyclic) |
606 | stm32_dma_configure_next_sg(chan); |
607 | |
608 | stm32_dma_dump_reg(chan); |
609 | |
610 | /* Start DMA */ |
611 | chan->busy = true; |
612 | chan->status = DMA_IN_PROGRESS; |
613 | reg->dma_scr |= STM32_DMA_SCR_EN; |
614 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), val: reg->dma_scr); |
615 | |
616 | dev_dbg(chan2dev(chan), "vchan %pK: started\n" , &chan->vchan); |
617 | } |
618 | |
619 | static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan) |
620 | { |
621 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
622 | struct stm32_dma_sg_req *sg_req; |
623 | u32 dma_scr, dma_sm0ar, dma_sm1ar, id; |
624 | |
625 | id = chan->id; |
626 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); |
627 | |
628 | sg_req = &chan->desc->sg_req[chan->next_sg]; |
629 | |
630 | if (dma_scr & STM32_DMA_SCR_CT) { |
631 | dma_sm0ar = sg_req->chan_reg.dma_sm0ar; |
632 | stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), val: dma_sm0ar); |
633 | dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n" , |
634 | stm32_dma_read(dmadev, STM32_DMA_SM0AR(id))); |
635 | } else { |
636 | dma_sm1ar = sg_req->chan_reg.dma_sm1ar; |
637 | stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), val: dma_sm1ar); |
638 | dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n" , |
639 | stm32_dma_read(dmadev, STM32_DMA_SM1AR(id))); |
640 | } |
641 | } |
642 | |
643 | static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan) |
644 | { |
645 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
646 | u32 dma_scr; |
647 | |
648 | /* |
649 | * Read and store current remaining data items and peripheral/memory addresses to be |
650 | * updated on resume |
651 | */ |
652 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); |
653 | /* |
654 | * Transfer can be paused while between a previous resume and reconfiguration on transfer |
655 | * complete. If transfer is cyclic and CIRC and DBM have been deactivated for resume, need |
656 | * to set it here in SCR backup to ensure a good reconfiguration on transfer complete. |
657 | */ |
658 | if (chan->desc && chan->desc->cyclic) { |
659 | if (chan->desc->num_sgs == 1) |
660 | dma_scr |= STM32_DMA_SCR_CIRC; |
661 | else |
662 | dma_scr |= STM32_DMA_SCR_DBM; |
663 | } |
664 | chan->chan_reg.dma_scr = dma_scr; |
665 | |
666 | /* |
667 | * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt, otherwise |
668 | * on resume NDTR autoreload value will be wrong (lower than the initial period length) |
669 | */ |
670 | if (chan->desc && chan->desc->cyclic) { |
671 | dma_scr &= ~(STM32_DMA_SCR_DBM | STM32_DMA_SCR_CIRC); |
672 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), val: dma_scr); |
673 | } |
674 | |
675 | chan->chan_reg.dma_sndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); |
676 | |
677 | chan->status = DMA_PAUSED; |
678 | |
679 | dev_dbg(chan2dev(chan), "vchan %pK: paused\n" , &chan->vchan); |
680 | } |
681 | |
682 | static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan) |
683 | { |
684 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
685 | struct stm32_dma_sg_req *sg_req; |
686 | u32 dma_scr, status, id; |
687 | |
688 | id = chan->id; |
689 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); |
690 | |
691 | /* Clear interrupt status if it is there */ |
692 | status = stm32_dma_irq_status(chan); |
693 | if (status) |
694 | stm32_dma_irq_clear(chan, flags: status); |
695 | |
696 | if (!chan->next_sg) |
697 | sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1]; |
698 | else |
699 | sg_req = &chan->desc->sg_req[chan->next_sg - 1]; |
700 | |
701 | /* Reconfigure NDTR with the initial value */ |
702 | stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), val: sg_req->chan_reg.dma_sndtr); |
703 | |
704 | /* Restore SPAR */ |
705 | stm32_dma_write(dmadev, STM32_DMA_SPAR(id), val: sg_req->chan_reg.dma_spar); |
706 | |
707 | /* Restore SM0AR/SM1AR whatever DBM/CT as they may have been modified */ |
708 | stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), val: sg_req->chan_reg.dma_sm0ar); |
709 | stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), val: sg_req->chan_reg.dma_sm1ar); |
710 | |
711 | /* Reactivate CIRC/DBM if needed */ |
712 | if (chan->chan_reg.dma_scr & STM32_DMA_SCR_DBM) { |
713 | dma_scr |= STM32_DMA_SCR_DBM; |
714 | /* Restore CT */ |
715 | if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CT) |
716 | dma_scr &= ~STM32_DMA_SCR_CT; |
717 | else |
718 | dma_scr |= STM32_DMA_SCR_CT; |
719 | } else if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CIRC) { |
720 | dma_scr |= STM32_DMA_SCR_CIRC; |
721 | } |
722 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), val: dma_scr); |
723 | |
724 | stm32_dma_configure_next_sg(chan); |
725 | |
726 | stm32_dma_dump_reg(chan); |
727 | |
728 | dma_scr |= STM32_DMA_SCR_EN; |
729 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), val: dma_scr); |
730 | |
731 | dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n" , &chan->vchan); |
732 | } |
733 | |
734 | static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr) |
735 | { |
736 | if (!chan->desc) |
737 | return; |
738 | |
739 | if (chan->desc->cyclic) { |
740 | vchan_cyclic_callback(vd: &chan->desc->vdesc); |
741 | if (chan->trig_mdma) |
742 | return; |
743 | stm32_dma_sg_inc(chan); |
744 | /* cyclic while CIRC/DBM disable => post resume reconfiguration needed */ |
745 | if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM))) |
746 | stm32_dma_post_resume_reconfigure(chan); |
747 | else if (scr & STM32_DMA_SCR_DBM) |
748 | stm32_dma_configure_next_sg(chan); |
749 | } else { |
750 | chan->busy = false; |
751 | chan->status = DMA_COMPLETE; |
752 | if (chan->next_sg == chan->desc->num_sgs) { |
753 | vchan_cookie_complete(vd: &chan->desc->vdesc); |
754 | chan->desc = NULL; |
755 | } |
756 | stm32_dma_start_transfer(chan); |
757 | } |
758 | } |
759 | |
760 | static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) |
761 | { |
762 | struct stm32_dma_chan *chan = devid; |
763 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
764 | u32 status, scr, sfcr; |
765 | |
766 | spin_lock(lock: &chan->vchan.lock); |
767 | |
768 | status = stm32_dma_irq_status(chan); |
769 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); |
770 | sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); |
771 | |
772 | if (status & STM32_DMA_FEI) { |
773 | stm32_dma_irq_clear(chan, STM32_DMA_FEI); |
774 | status &= ~STM32_DMA_FEI; |
775 | if (sfcr & STM32_DMA_SFCR_FEIE) { |
776 | if (!(scr & STM32_DMA_SCR_EN) && |
777 | !(status & STM32_DMA_TCI)) |
778 | dev_err(chan2dev(chan), "FIFO Error\n" ); |
779 | else |
780 | dev_dbg(chan2dev(chan), "FIFO over/underrun\n" ); |
781 | } |
782 | } |
783 | if (status & STM32_DMA_DMEI) { |
784 | stm32_dma_irq_clear(chan, STM32_DMA_DMEI); |
785 | status &= ~STM32_DMA_DMEI; |
786 | if (sfcr & STM32_DMA_SCR_DMEIE) |
787 | dev_dbg(chan2dev(chan), "Direct mode overrun\n" ); |
788 | } |
789 | |
790 | if (status & STM32_DMA_TCI) { |
791 | stm32_dma_irq_clear(chan, STM32_DMA_TCI); |
792 | if (scr & STM32_DMA_SCR_TCIE) { |
793 | if (chan->status != DMA_PAUSED) |
794 | stm32_dma_handle_chan_done(chan, scr); |
795 | } |
796 | status &= ~STM32_DMA_TCI; |
797 | } |
798 | |
799 | if (status & STM32_DMA_HTI) { |
800 | stm32_dma_irq_clear(chan, STM32_DMA_HTI); |
801 | status &= ~STM32_DMA_HTI; |
802 | } |
803 | |
804 | if (status) { |
805 | stm32_dma_irq_clear(chan, flags: status); |
806 | dev_err(chan2dev(chan), "DMA error: status=0x%08x\n" , status); |
807 | if (!(scr & STM32_DMA_SCR_EN)) |
808 | dev_err(chan2dev(chan), "chan disabled by HW\n" ); |
809 | } |
810 | |
811 | spin_unlock(lock: &chan->vchan.lock); |
812 | |
813 | return IRQ_HANDLED; |
814 | } |
815 | |
816 | static void stm32_dma_issue_pending(struct dma_chan *c) |
817 | { |
818 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); |
819 | unsigned long flags; |
820 | |
821 | spin_lock_irqsave(&chan->vchan.lock, flags); |
822 | if (vchan_issue_pending(vc: &chan->vchan) && !chan->desc && !chan->busy) { |
823 | dev_dbg(chan2dev(chan), "vchan %pK: issued\n" , &chan->vchan); |
824 | stm32_dma_start_transfer(chan); |
825 | |
826 | } |
827 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
828 | } |
829 | |
830 | static int stm32_dma_pause(struct dma_chan *c) |
831 | { |
832 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); |
833 | unsigned long flags; |
834 | int ret; |
835 | |
836 | if (chan->status != DMA_IN_PROGRESS) |
837 | return -EPERM; |
838 | |
839 | spin_lock_irqsave(&chan->vchan.lock, flags); |
840 | |
841 | ret = stm32_dma_disable_chan(chan); |
842 | if (!ret) |
843 | stm32_dma_handle_chan_paused(chan); |
844 | |
845 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
846 | |
847 | return ret; |
848 | } |
849 | |
850 | static int stm32_dma_resume(struct dma_chan *c) |
851 | { |
852 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); |
853 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
854 | struct stm32_dma_chan_reg chan_reg = chan->chan_reg; |
855 | u32 id = chan->id, scr, ndtr, offset, spar, sm0ar, sm1ar; |
856 | struct stm32_dma_sg_req *sg_req; |
857 | unsigned long flags; |
858 | |
859 | if (chan->status != DMA_PAUSED) |
860 | return -EPERM; |
861 | |
862 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); |
863 | if (WARN_ON(scr & STM32_DMA_SCR_EN)) |
864 | return -EPERM; |
865 | |
866 | spin_lock_irqsave(&chan->vchan.lock, flags); |
867 | |
868 | /* sg_reg[prev_sg] contains original ndtr, sm0ar and sm1ar before pausing the transfer */ |
869 | if (!chan->next_sg) |
870 | sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1]; |
871 | else |
872 | sg_req = &chan->desc->sg_req[chan->next_sg - 1]; |
873 | |
874 | ndtr = sg_req->chan_reg.dma_sndtr; |
875 | offset = (ndtr - chan_reg.dma_sndtr); |
876 | offset <<= FIELD_GET(STM32_DMA_SCR_PSIZE_MASK, chan_reg.dma_scr); |
877 | spar = sg_req->chan_reg.dma_spar; |
878 | sm0ar = sg_req->chan_reg.dma_sm0ar; |
879 | sm1ar = sg_req->chan_reg.dma_sm1ar; |
880 | |
881 | /* |
882 | * The peripheral and/or memory addresses have to be updated in order to adjust the |
883 | * address pointers. Need to check increment. |
884 | */ |
885 | if (chan_reg.dma_scr & STM32_DMA_SCR_PINC) |
886 | stm32_dma_write(dmadev, STM32_DMA_SPAR(id), val: spar + offset); |
887 | else |
888 | stm32_dma_write(dmadev, STM32_DMA_SPAR(id), val: spar); |
889 | |
890 | if (!(chan_reg.dma_scr & STM32_DMA_SCR_MINC)) |
891 | offset = 0; |
892 | |
893 | /* |
894 | * In case of DBM, the current target could be SM1AR. |
895 | * Need to temporarily deactivate CIRC/DBM to finish the current transfer, so |
896 | * SM0AR becomes the current target and must be updated with SM1AR + offset if CT=1. |
897 | */ |
898 | if ((chan_reg.dma_scr & STM32_DMA_SCR_DBM) && (chan_reg.dma_scr & STM32_DMA_SCR_CT)) |
899 | stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), val: sm1ar + offset); |
900 | else |
901 | stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), val: sm0ar + offset); |
902 | |
903 | /* NDTR must be restored otherwise internal HW counter won't be correctly reset */ |
904 | stm32_dma_write(dmadev, STM32_DMA_SNDTR(id), val: chan_reg.dma_sndtr); |
905 | |
906 | /* |
907 | * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt, |
908 | * otherwise NDTR autoreload value will be wrong (lower than the initial period length) |
909 | */ |
910 | if (chan_reg.dma_scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)) |
911 | chan_reg.dma_scr &= ~(STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM); |
912 | |
913 | if (chan_reg.dma_scr & STM32_DMA_SCR_DBM) |
914 | stm32_dma_configure_next_sg(chan); |
915 | |
916 | stm32_dma_dump_reg(chan); |
917 | |
918 | /* The stream may then be re-enabled to restart transfer from the point it was stopped */ |
919 | chan->status = DMA_IN_PROGRESS; |
920 | chan_reg.dma_scr |= STM32_DMA_SCR_EN; |
921 | stm32_dma_write(dmadev, STM32_DMA_SCR(id), val: chan_reg.dma_scr); |
922 | |
923 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
924 | |
925 | dev_dbg(chan2dev(chan), "vchan %pK: resumed\n" , &chan->vchan); |
926 | |
927 | return 0; |
928 | } |
929 | |
930 | static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, |
931 | enum dma_transfer_direction direction, |
932 | enum dma_slave_buswidth *buswidth, |
933 | u32 buf_len, dma_addr_t buf_addr) |
934 | { |
935 | enum dma_slave_buswidth src_addr_width, dst_addr_width; |
936 | int src_bus_width, dst_bus_width; |
937 | int src_burst_size, dst_burst_size; |
938 | u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst; |
939 | u32 dma_scr, fifoth; |
940 | |
941 | src_addr_width = chan->dma_sconfig.src_addr_width; |
942 | dst_addr_width = chan->dma_sconfig.dst_addr_width; |
943 | src_maxburst = chan->dma_sconfig.src_maxburst; |
944 | dst_maxburst = chan->dma_sconfig.dst_maxburst; |
945 | fifoth = chan->threshold; |
946 | |
947 | switch (direction) { |
948 | case DMA_MEM_TO_DEV: |
949 | /* Set device data size */ |
950 | dst_bus_width = stm32_dma_get_width(chan, width: dst_addr_width); |
951 | if (dst_bus_width < 0) |
952 | return dst_bus_width; |
953 | |
954 | /* Set device burst size */ |
955 | dst_best_burst = stm32_dma_get_best_burst(buf_len, |
956 | max_burst: dst_maxburst, |
957 | threshold: fifoth, |
958 | width: dst_addr_width); |
959 | |
960 | dst_burst_size = stm32_dma_get_burst(chan, maxburst: dst_best_burst); |
961 | if (dst_burst_size < 0) |
962 | return dst_burst_size; |
963 | |
964 | /* Set memory data size */ |
965 | src_addr_width = stm32_dma_get_max_width(buf_len, buf_addr, |
966 | threshold: fifoth); |
967 | chan->mem_width = src_addr_width; |
968 | src_bus_width = stm32_dma_get_width(chan, width: src_addr_width); |
969 | if (src_bus_width < 0) |
970 | return src_bus_width; |
971 | |
972 | /* |
973 | * Set memory burst size - burst not possible if address is not aligned on |
974 | * the address boundary equal to the size of the transfer |
975 | */ |
976 | if (buf_addr & (buf_len - 1)) |
977 | src_maxburst = 1; |
978 | else |
979 | src_maxburst = STM32_DMA_MAX_BURST; |
980 | src_best_burst = stm32_dma_get_best_burst(buf_len, |
981 | max_burst: src_maxburst, |
982 | threshold: fifoth, |
983 | width: src_addr_width); |
984 | src_burst_size = stm32_dma_get_burst(chan, maxburst: src_best_burst); |
985 | if (src_burst_size < 0) |
986 | return src_burst_size; |
987 | |
988 | dma_scr = FIELD_PREP(STM32_DMA_SCR_DIR_MASK, STM32_DMA_MEM_TO_DEV) | |
989 | FIELD_PREP(STM32_DMA_SCR_PSIZE_MASK, dst_bus_width) | |
990 | FIELD_PREP(STM32_DMA_SCR_MSIZE_MASK, src_bus_width) | |
991 | FIELD_PREP(STM32_DMA_SCR_PBURST_MASK, dst_burst_size) | |
992 | FIELD_PREP(STM32_DMA_SCR_MBURST_MASK, src_burst_size); |
993 | |
994 | /* Set FIFO threshold */ |
995 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; |
996 | if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE) |
997 | chan->chan_reg.dma_sfcr |= FIELD_PREP(STM32_DMA_SFCR_FTH_MASK, fifoth); |
998 | |
999 | /* Set peripheral address */ |
1000 | chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr; |
1001 | *buswidth = dst_addr_width; |
1002 | break; |
1003 | |
1004 | case DMA_DEV_TO_MEM: |
1005 | /* Set device data size */ |
1006 | src_bus_width = stm32_dma_get_width(chan, width: src_addr_width); |
1007 | if (src_bus_width < 0) |
1008 | return src_bus_width; |
1009 | |
1010 | /* Set device burst size */ |
1011 | src_best_burst = stm32_dma_get_best_burst(buf_len, |
1012 | max_burst: src_maxburst, |
1013 | threshold: fifoth, |
1014 | width: src_addr_width); |
1015 | chan->mem_burst = src_best_burst; |
1016 | src_burst_size = stm32_dma_get_burst(chan, maxburst: src_best_burst); |
1017 | if (src_burst_size < 0) |
1018 | return src_burst_size; |
1019 | |
1020 | /* Set memory data size */ |
1021 | dst_addr_width = stm32_dma_get_max_width(buf_len, buf_addr, |
1022 | threshold: fifoth); |
1023 | chan->mem_width = dst_addr_width; |
1024 | dst_bus_width = stm32_dma_get_width(chan, width: dst_addr_width); |
1025 | if (dst_bus_width < 0) |
1026 | return dst_bus_width; |
1027 | |
1028 | /* |
1029 | * Set memory burst size - burst not possible if address is not aligned on |
1030 | * the address boundary equal to the size of the transfer |
1031 | */ |
1032 | if (buf_addr & (buf_len - 1)) |
1033 | dst_maxburst = 1; |
1034 | else |
1035 | dst_maxburst = STM32_DMA_MAX_BURST; |
1036 | dst_best_burst = stm32_dma_get_best_burst(buf_len, |
1037 | max_burst: dst_maxburst, |
1038 | threshold: fifoth, |
1039 | width: dst_addr_width); |
1040 | chan->mem_burst = dst_best_burst; |
1041 | dst_burst_size = stm32_dma_get_burst(chan, maxburst: dst_best_burst); |
1042 | if (dst_burst_size < 0) |
1043 | return dst_burst_size; |
1044 | |
1045 | dma_scr = FIELD_PREP(STM32_DMA_SCR_DIR_MASK, STM32_DMA_DEV_TO_MEM) | |
1046 | FIELD_PREP(STM32_DMA_SCR_PSIZE_MASK, src_bus_width) | |
1047 | FIELD_PREP(STM32_DMA_SCR_MSIZE_MASK, dst_bus_width) | |
1048 | FIELD_PREP(STM32_DMA_SCR_PBURST_MASK, src_burst_size) | |
1049 | FIELD_PREP(STM32_DMA_SCR_MBURST_MASK, dst_burst_size); |
1050 | |
1051 | /* Set FIFO threshold */ |
1052 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; |
1053 | if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE) |
1054 | chan->chan_reg.dma_sfcr |= FIELD_PREP(STM32_DMA_SFCR_FTH_MASK, fifoth); |
1055 | |
1056 | /* Set peripheral address */ |
1057 | chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr; |
1058 | *buswidth = chan->dma_sconfig.src_addr_width; |
1059 | break; |
1060 | |
1061 | default: |
1062 | dev_err(chan2dev(chan), "Dma direction is not supported\n" ); |
1063 | return -EINVAL; |
1064 | } |
1065 | |
1066 | stm32_dma_set_fifo_config(chan, src_burst: src_best_burst, dst_burst: dst_best_burst); |
1067 | |
1068 | /* Set DMA control register */ |
1069 | chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK | |
1070 | STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK | |
1071 | STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK); |
1072 | chan->chan_reg.dma_scr |= dma_scr; |
1073 | |
1074 | return 0; |
1075 | } |
1076 | |
1077 | static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs) |
1078 | { |
1079 | memset(regs, 0, sizeof(struct stm32_dma_chan_reg)); |
1080 | } |
1081 | |
1082 | static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg( |
1083 | struct dma_chan *c, struct scatterlist *sgl, |
1084 | u32 sg_len, enum dma_transfer_direction direction, |
1085 | unsigned long flags, void *context) |
1086 | { |
1087 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); |
1088 | struct stm32_dma_desc *desc; |
1089 | struct scatterlist *sg; |
1090 | enum dma_slave_buswidth buswidth; |
1091 | u32 nb_data_items; |
1092 | int i, ret; |
1093 | |
1094 | if (!chan->config_init) { |
1095 | dev_err(chan2dev(chan), "dma channel is not configured\n" ); |
1096 | return NULL; |
1097 | } |
1098 | |
1099 | if (sg_len < 1) { |
1100 | dev_err(chan2dev(chan), "Invalid segment length %d\n" , sg_len); |
1101 | return NULL; |
1102 | } |
1103 | |
1104 | desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT); |
1105 | if (!desc) |
1106 | return NULL; |
1107 | desc->num_sgs = sg_len; |
1108 | |
1109 | /* Set peripheral flow controller */ |
1110 | if (chan->dma_sconfig.device_fc) |
1111 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL; |
1112 | else |
1113 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; |
1114 | |
1115 | /* Activate Double Buffer Mode if DMA triggers STM32 MDMA and more than 1 sg */ |
1116 | if (chan->trig_mdma && sg_len > 1) { |
1117 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM; |
1118 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT; |
1119 | } |
1120 | |
1121 | for_each_sg(sgl, sg, sg_len, i) { |
1122 | ret = stm32_dma_set_xfer_param(chan, direction, buswidth: &buswidth, |
1123 | sg_dma_len(sg), |
1124 | sg_dma_address(sg)); |
1125 | if (ret < 0) |
1126 | goto err; |
1127 | |
1128 | desc->sg_req[i].len = sg_dma_len(sg); |
1129 | |
1130 | nb_data_items = desc->sg_req[i].len / buswidth; |
1131 | if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) { |
1132 | dev_err(chan2dev(chan), "nb items not supported\n" ); |
1133 | goto err; |
1134 | } |
1135 | |
1136 | stm32_dma_clear_reg(regs: &desc->sg_req[i].chan_reg); |
1137 | desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr; |
1138 | desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr; |
1139 | desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar; |
1140 | desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg); |
1141 | desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg); |
1142 | if (chan->trig_mdma) |
1143 | desc->sg_req[i].chan_reg.dma_sm1ar += sg_dma_len(sg); |
1144 | desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items; |
1145 | } |
1146 | desc->cyclic = false; |
1147 | |
1148 | return vchan_tx_prep(vc: &chan->vchan, vd: &desc->vdesc, tx_flags: flags); |
1149 | |
1150 | err: |
1151 | kfree(objp: desc); |
1152 | return NULL; |
1153 | } |
1154 | |
1155 | static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic( |
1156 | struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, |
1157 | size_t period_len, enum dma_transfer_direction direction, |
1158 | unsigned long flags) |
1159 | { |
1160 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); |
1161 | struct stm32_dma_desc *desc; |
1162 | enum dma_slave_buswidth buswidth; |
1163 | u32 num_periods, nb_data_items; |
1164 | int i, ret; |
1165 | |
1166 | if (!buf_len || !period_len) { |
1167 | dev_err(chan2dev(chan), "Invalid buffer/period len\n" ); |
1168 | return NULL; |
1169 | } |
1170 | |
1171 | if (!chan->config_init) { |
1172 | dev_err(chan2dev(chan), "dma channel is not configured\n" ); |
1173 | return NULL; |
1174 | } |
1175 | |
1176 | if (buf_len % period_len) { |
1177 | dev_err(chan2dev(chan), "buf_len not multiple of period_len\n" ); |
1178 | return NULL; |
1179 | } |
1180 | |
1181 | /* |
1182 | * We allow to take more number of requests till DMA is |
1183 | * not started. The driver will loop over all requests. |
1184 | * Once DMA is started then new requests can be queued only after |
1185 | * terminating the DMA. |
1186 | */ |
1187 | if (chan->busy) { |
1188 | dev_err(chan2dev(chan), "Request not allowed when dma busy\n" ); |
1189 | return NULL; |
1190 | } |
1191 | |
1192 | ret = stm32_dma_set_xfer_param(chan, direction, buswidth: &buswidth, buf_len: period_len, |
1193 | buf_addr); |
1194 | if (ret < 0) |
1195 | return NULL; |
1196 | |
1197 | nb_data_items = period_len / buswidth; |
1198 | if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) { |
1199 | dev_err(chan2dev(chan), "number of items not supported\n" ); |
1200 | return NULL; |
1201 | } |
1202 | |
1203 | /* Enable Circular mode or double buffer mode */ |
1204 | if (buf_len == period_len) { |
1205 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC; |
1206 | } else { |
1207 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM; |
1208 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT; |
1209 | } |
1210 | |
1211 | /* Clear periph ctrl if client set it */ |
1212 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; |
1213 | |
1214 | num_periods = buf_len / period_len; |
1215 | |
1216 | desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT); |
1217 | if (!desc) |
1218 | return NULL; |
1219 | desc->num_sgs = num_periods; |
1220 | |
1221 | for (i = 0; i < num_periods; i++) { |
1222 | desc->sg_req[i].len = period_len; |
1223 | |
1224 | stm32_dma_clear_reg(regs: &desc->sg_req[i].chan_reg); |
1225 | desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr; |
1226 | desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr; |
1227 | desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar; |
1228 | desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr; |
1229 | desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr; |
1230 | if (chan->trig_mdma) |
1231 | desc->sg_req[i].chan_reg.dma_sm1ar += period_len; |
1232 | desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items; |
1233 | if (!chan->trig_mdma) |
1234 | buf_addr += period_len; |
1235 | } |
1236 | desc->cyclic = true; |
1237 | |
1238 | return vchan_tx_prep(vc: &chan->vchan, vd: &desc->vdesc, tx_flags: flags); |
1239 | } |
1240 | |
1241 | static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy( |
1242 | struct dma_chan *c, dma_addr_t dest, |
1243 | dma_addr_t src, size_t len, unsigned long flags) |
1244 | { |
1245 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); |
1246 | enum dma_slave_buswidth max_width; |
1247 | struct stm32_dma_desc *desc; |
1248 | size_t xfer_count, offset; |
1249 | u32 num_sgs, best_burst, dma_burst, threshold; |
1250 | int i; |
1251 | |
1252 | num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS); |
1253 | desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT); |
1254 | if (!desc) |
1255 | return NULL; |
1256 | desc->num_sgs = num_sgs; |
1257 | |
1258 | threshold = chan->threshold; |
1259 | |
1260 | for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) { |
1261 | xfer_count = min_t(size_t, len - offset, |
1262 | STM32_DMA_ALIGNED_MAX_DATA_ITEMS); |
1263 | |
1264 | /* Compute best burst size */ |
1265 | max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; |
1266 | best_burst = stm32_dma_get_best_burst(buf_len: len, STM32_DMA_MAX_BURST, |
1267 | threshold, width: max_width); |
1268 | dma_burst = stm32_dma_get_burst(chan, maxburst: best_burst); |
1269 | |
1270 | stm32_dma_clear_reg(regs: &desc->sg_req[i].chan_reg); |
1271 | desc->sg_req[i].chan_reg.dma_scr = |
1272 | FIELD_PREP(STM32_DMA_SCR_DIR_MASK, STM32_DMA_MEM_TO_MEM) | |
1273 | FIELD_PREP(STM32_DMA_SCR_PBURST_MASK, dma_burst) | |
1274 | FIELD_PREP(STM32_DMA_SCR_MBURST_MASK, dma_burst) | |
1275 | STM32_DMA_SCR_MINC | |
1276 | STM32_DMA_SCR_PINC | |
1277 | STM32_DMA_SCR_TCIE | |
1278 | STM32_DMA_SCR_TEIE; |
1279 | desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK; |
1280 | desc->sg_req[i].chan_reg.dma_sfcr |= FIELD_PREP(STM32_DMA_SFCR_FTH_MASK, threshold); |
1281 | desc->sg_req[i].chan_reg.dma_spar = src + offset; |
1282 | desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset; |
1283 | desc->sg_req[i].chan_reg.dma_sndtr = xfer_count; |
1284 | desc->sg_req[i].len = xfer_count; |
1285 | } |
1286 | desc->cyclic = false; |
1287 | |
1288 | return vchan_tx_prep(vc: &chan->vchan, vd: &desc->vdesc, tx_flags: flags); |
1289 | } |
1290 | |
1291 | static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan) |
1292 | { |
1293 | u32 dma_scr, width, ndtr; |
1294 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
1295 | |
1296 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); |
1297 | width = FIELD_GET(STM32_DMA_SCR_PSIZE_MASK, dma_scr); |
1298 | ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); |
1299 | |
1300 | return ndtr << width; |
1301 | } |
1302 | |
1303 | /** |
1304 | * stm32_dma_is_current_sg - check that expected sg_req is currently transferred |
1305 | * @chan: dma channel |
1306 | * |
1307 | * This function called when IRQ are disable, checks that the hardware has not |
1308 | * switched on the next transfer in double buffer mode. The test is done by |
1309 | * comparing the next_sg memory address with the hardware related register |
1310 | * (based on CT bit value). |
1311 | * |
1312 | * Returns true if expected current transfer is still running or double |
1313 | * buffer mode is not activated. |
1314 | */ |
1315 | static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan) |
1316 | { |
1317 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
1318 | struct stm32_dma_sg_req *sg_req; |
1319 | u32 dma_scr, dma_smar, id, period_len; |
1320 | |
1321 | id = chan->id; |
1322 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); |
1323 | |
1324 | /* In cyclic CIRC but not DBM, CT is not used */ |
1325 | if (!(dma_scr & STM32_DMA_SCR_DBM)) |
1326 | return true; |
1327 | |
1328 | sg_req = &chan->desc->sg_req[chan->next_sg]; |
1329 | period_len = sg_req->len; |
1330 | |
1331 | /* DBM - take care of a previous pause/resume not yet post reconfigured */ |
1332 | if (dma_scr & STM32_DMA_SCR_CT) { |
1333 | dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)); |
1334 | /* |
1335 | * If transfer has been pause/resumed, |
1336 | * SM0AR is in the range of [SM0AR:SM0AR+period_len] |
1337 | */ |
1338 | return (dma_smar >= sg_req->chan_reg.dma_sm0ar && |
1339 | dma_smar < sg_req->chan_reg.dma_sm0ar + period_len); |
1340 | } |
1341 | |
1342 | dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)); |
1343 | /* |
1344 | * If transfer has been pause/resumed, |
1345 | * SM1AR is in the range of [SM1AR:SM1AR+period_len] |
1346 | */ |
1347 | return (dma_smar >= sg_req->chan_reg.dma_sm1ar && |
1348 | dma_smar < sg_req->chan_reg.dma_sm1ar + period_len); |
1349 | } |
1350 | |
1351 | static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, |
1352 | struct stm32_dma_desc *desc, |
1353 | u32 next_sg) |
1354 | { |
1355 | u32 modulo, burst_size; |
1356 | u32 residue; |
1357 | u32 n_sg = next_sg; |
1358 | struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg]; |
1359 | int i; |
1360 | |
1361 | /* |
1362 | * Calculate the residue means compute the descriptors |
1363 | * information: |
1364 | * - the sg_req currently transferred |
1365 | * - the Hardware remaining position in this sg (NDTR bits field). |
1366 | * |
1367 | * A race condition may occur if DMA is running in cyclic or double |
1368 | * buffer mode, since the DMA register are automatically reloaded at end |
1369 | * of period transfer. The hardware may have switched to the next |
1370 | * transfer (CT bit updated) just before the position (SxNDTR reg) is |
1371 | * read. |
1372 | * In this case the SxNDTR reg could (or not) correspond to the new |
1373 | * transfer position, and not the expected one. |
1374 | * The strategy implemented in the stm32 driver is to: |
1375 | * - read the SxNDTR register |
1376 | * - crosscheck that hardware is still in current transfer. |
1377 | * In case of switch, we can assume that the DMA is at the beginning of |
1378 | * the next transfer. So we approximate the residue in consequence, by |
1379 | * pointing on the beginning of next transfer. |
1380 | * |
1381 | * This race condition doesn't apply for none cyclic mode, as double |
1382 | * buffer is not used. In such situation registers are updated by the |
1383 | * software. |
1384 | */ |
1385 | |
1386 | residue = stm32_dma_get_remaining_bytes(chan); |
1387 | |
1388 | if ((chan->desc->cyclic || chan->trig_mdma) && !stm32_dma_is_current_sg(chan)) { |
1389 | n_sg++; |
1390 | if (n_sg == chan->desc->num_sgs) |
1391 | n_sg = 0; |
1392 | if (!chan->trig_mdma) |
1393 | residue = sg_req->len; |
1394 | } |
1395 | |
1396 | /* |
1397 | * In cyclic mode, for the last period, residue = remaining bytes |
1398 | * from NDTR, |
1399 | * else for all other periods in cyclic mode, and in sg mode, |
1400 | * residue = remaining bytes from NDTR + remaining |
1401 | * periods/sg to be transferred |
1402 | */ |
1403 | if ((!chan->desc->cyclic && !chan->trig_mdma) || n_sg != 0) |
1404 | for (i = n_sg; i < desc->num_sgs; i++) |
1405 | residue += desc->sg_req[i].len; |
1406 | |
1407 | if (!chan->mem_burst) |
1408 | return residue; |
1409 | |
1410 | burst_size = chan->mem_burst * chan->mem_width; |
1411 | modulo = residue % burst_size; |
1412 | if (modulo) |
1413 | residue = residue - modulo + burst_size; |
1414 | |
1415 | return residue; |
1416 | } |
1417 | |
1418 | static enum dma_status stm32_dma_tx_status(struct dma_chan *c, |
1419 | dma_cookie_t cookie, |
1420 | struct dma_tx_state *state) |
1421 | { |
1422 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); |
1423 | struct virt_dma_desc *vdesc; |
1424 | enum dma_status status; |
1425 | unsigned long flags; |
1426 | u32 residue = 0; |
1427 | |
1428 | status = dma_cookie_status(chan: c, cookie, state); |
1429 | if (status == DMA_COMPLETE) |
1430 | return status; |
1431 | |
1432 | status = chan->status; |
1433 | |
1434 | if (!state) |
1435 | return status; |
1436 | |
1437 | spin_lock_irqsave(&chan->vchan.lock, flags); |
1438 | vdesc = vchan_find_desc(&chan->vchan, cookie); |
1439 | if (chan->desc && cookie == chan->desc->vdesc.tx.cookie) |
1440 | residue = stm32_dma_desc_residue(chan, desc: chan->desc, |
1441 | next_sg: chan->next_sg); |
1442 | else if (vdesc) |
1443 | residue = stm32_dma_desc_residue(chan, |
1444 | desc: to_stm32_dma_desc(vdesc), next_sg: 0); |
1445 | dma_set_residue(state, residue); |
1446 | |
1447 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
1448 | |
1449 | return status; |
1450 | } |
1451 | |
1452 | static int stm32_dma_alloc_chan_resources(struct dma_chan *c) |
1453 | { |
1454 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); |
1455 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
1456 | int ret; |
1457 | |
1458 | chan->config_init = false; |
1459 | |
1460 | ret = pm_runtime_resume_and_get(dev: dmadev->ddev.dev); |
1461 | if (ret < 0) |
1462 | return ret; |
1463 | |
1464 | ret = stm32_dma_disable_chan(chan); |
1465 | if (ret < 0) |
1466 | pm_runtime_put(dev: dmadev->ddev.dev); |
1467 | |
1468 | return ret; |
1469 | } |
1470 | |
1471 | static void stm32_dma_free_chan_resources(struct dma_chan *c) |
1472 | { |
1473 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); |
1474 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
1475 | unsigned long flags; |
1476 | |
1477 | dev_dbg(chan2dev(chan), "Freeing channel %d\n" , chan->id); |
1478 | |
1479 | if (chan->busy) { |
1480 | spin_lock_irqsave(&chan->vchan.lock, flags); |
1481 | stm32_dma_stop(chan); |
1482 | chan->desc = NULL; |
1483 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
1484 | } |
1485 | |
1486 | pm_runtime_put(dev: dmadev->ddev.dev); |
1487 | |
1488 | vchan_free_chan_resources(vc: to_virt_chan(chan: c)); |
1489 | stm32_dma_clear_reg(regs: &chan->chan_reg); |
1490 | chan->threshold = 0; |
1491 | } |
1492 | |
1493 | static void stm32_dma_desc_free(struct virt_dma_desc *vdesc) |
1494 | { |
1495 | kfree(container_of(vdesc, struct stm32_dma_desc, vdesc)); |
1496 | } |
1497 | |
1498 | static void stm32_dma_set_config(struct stm32_dma_chan *chan, |
1499 | struct stm32_dma_cfg *cfg) |
1500 | { |
1501 | stm32_dma_clear_reg(regs: &chan->chan_reg); |
1502 | |
1503 | chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK; |
1504 | chan->chan_reg.dma_scr |= FIELD_PREP(STM32_DMA_SCR_REQ_MASK, cfg->request_line); |
1505 | |
1506 | /* Enable Interrupts */ |
1507 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE; |
1508 | |
1509 | chan->threshold = FIELD_GET(STM32_DMA_THRESHOLD_FTR_MASK, cfg->features); |
1510 | if (FIELD_GET(STM32_DMA_DIRECT_MODE_MASK, cfg->features)) |
1511 | chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE; |
1512 | if (FIELD_GET(STM32_DMA_ALT_ACK_MODE_MASK, cfg->features)) |
1513 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_TRBUFF; |
1514 | chan->mdma_config.stream_id = FIELD_GET(STM32_DMA_MDMA_STREAM_ID_MASK, cfg->features); |
1515 | } |
1516 | |
1517 | static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, |
1518 | struct of_dma *ofdma) |
1519 | { |
1520 | struct stm32_dma_device *dmadev = ofdma->of_dma_data; |
1521 | struct device *dev = dmadev->ddev.dev; |
1522 | struct stm32_dma_cfg cfg; |
1523 | struct stm32_dma_chan *chan; |
1524 | struct dma_chan *c; |
1525 | |
1526 | if (dma_spec->args_count < 4) { |
1527 | dev_err(dev, "Bad number of cells\n" ); |
1528 | return NULL; |
1529 | } |
1530 | |
1531 | cfg.channel_id = dma_spec->args[0]; |
1532 | cfg.request_line = dma_spec->args[1]; |
1533 | cfg.stream_config = dma_spec->args[2]; |
1534 | cfg.features = dma_spec->args[3]; |
1535 | |
1536 | if (cfg.channel_id >= STM32_DMA_MAX_CHANNELS || |
1537 | cfg.request_line >= STM32_DMA_MAX_REQUEST_ID) { |
1538 | dev_err(dev, "Bad channel and/or request id\n" ); |
1539 | return NULL; |
1540 | } |
1541 | |
1542 | chan = &dmadev->chan[cfg.channel_id]; |
1543 | |
1544 | c = dma_get_slave_channel(chan: &chan->vchan.chan); |
1545 | if (!c) { |
1546 | dev_err(dev, "No more channels available\n" ); |
1547 | return NULL; |
1548 | } |
1549 | |
1550 | stm32_dma_set_config(chan, cfg: &cfg); |
1551 | |
1552 | return c; |
1553 | } |
1554 | |
1555 | static const struct of_device_id stm32_dma_of_match[] = { |
1556 | { .compatible = "st,stm32-dma" , }, |
1557 | { /* sentinel */ }, |
1558 | }; |
1559 | MODULE_DEVICE_TABLE(of, stm32_dma_of_match); |
1560 | |
1561 | static int stm32_dma_probe(struct platform_device *pdev) |
1562 | { |
1563 | struct stm32_dma_chan *chan; |
1564 | struct stm32_dma_device *dmadev; |
1565 | struct dma_device *dd; |
1566 | struct resource *res; |
1567 | struct reset_control *rst; |
1568 | int i, ret; |
1569 | |
1570 | dmadev = devm_kzalloc(dev: &pdev->dev, size: sizeof(*dmadev), GFP_KERNEL); |
1571 | if (!dmadev) |
1572 | return -ENOMEM; |
1573 | |
1574 | dd = &dmadev->ddev; |
1575 | |
1576 | dmadev->base = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: &res); |
1577 | if (IS_ERR(ptr: dmadev->base)) |
1578 | return PTR_ERR(ptr: dmadev->base); |
1579 | |
1580 | dmadev->clk = devm_clk_get(dev: &pdev->dev, NULL); |
1581 | if (IS_ERR(ptr: dmadev->clk)) |
1582 | return dev_err_probe(dev: &pdev->dev, err: PTR_ERR(ptr: dmadev->clk), fmt: "Can't get clock\n" ); |
1583 | |
1584 | ret = clk_prepare_enable(clk: dmadev->clk); |
1585 | if (ret < 0) { |
1586 | dev_err(&pdev->dev, "clk_prep_enable error: %d\n" , ret); |
1587 | return ret; |
1588 | } |
1589 | |
1590 | dmadev->mem2mem = of_property_read_bool(np: pdev->dev.of_node, |
1591 | propname: "st,mem2mem" ); |
1592 | |
1593 | rst = devm_reset_control_get(dev: &pdev->dev, NULL); |
1594 | if (IS_ERR(ptr: rst)) { |
1595 | ret = PTR_ERR(ptr: rst); |
1596 | if (ret == -EPROBE_DEFER) |
1597 | goto clk_free; |
1598 | } else { |
1599 | reset_control_assert(rstc: rst); |
1600 | udelay(2); |
1601 | reset_control_deassert(rstc: rst); |
1602 | } |
1603 | |
1604 | dma_set_max_seg_size(dev: &pdev->dev, STM32_DMA_ALIGNED_MAX_DATA_ITEMS); |
1605 | |
1606 | dma_cap_set(DMA_SLAVE, dd->cap_mask); |
1607 | dma_cap_set(DMA_PRIVATE, dd->cap_mask); |
1608 | dma_cap_set(DMA_CYCLIC, dd->cap_mask); |
1609 | dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources; |
1610 | dd->device_free_chan_resources = stm32_dma_free_chan_resources; |
1611 | dd->device_tx_status = stm32_dma_tx_status; |
1612 | dd->device_issue_pending = stm32_dma_issue_pending; |
1613 | dd->device_prep_slave_sg = stm32_dma_prep_slave_sg; |
1614 | dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic; |
1615 | dd->device_config = stm32_dma_slave_config; |
1616 | dd->device_pause = stm32_dma_pause; |
1617 | dd->device_resume = stm32_dma_resume; |
1618 | dd->device_terminate_all = stm32_dma_terminate_all; |
1619 | dd->device_synchronize = stm32_dma_synchronize; |
1620 | dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
1621 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | |
1622 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
1623 | dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
1624 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | |
1625 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
1626 | dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
1627 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
1628 | dd->copy_align = DMAENGINE_ALIGN_32_BYTES; |
1629 | dd->max_burst = STM32_DMA_MAX_BURST; |
1630 | dd->max_sg_burst = STM32_DMA_ALIGNED_MAX_DATA_ITEMS; |
1631 | dd->descriptor_reuse = true; |
1632 | dd->dev = &pdev->dev; |
1633 | INIT_LIST_HEAD(list: &dd->channels); |
1634 | |
1635 | if (dmadev->mem2mem) { |
1636 | dma_cap_set(DMA_MEMCPY, dd->cap_mask); |
1637 | dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy; |
1638 | dd->directions |= BIT(DMA_MEM_TO_MEM); |
1639 | } |
1640 | |
1641 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { |
1642 | chan = &dmadev->chan[i]; |
1643 | chan->id = i; |
1644 | chan->vchan.desc_free = stm32_dma_desc_free; |
1645 | vchan_init(vc: &chan->vchan, dmadev: dd); |
1646 | |
1647 | chan->mdma_config.ifcr = res->start; |
1648 | chan->mdma_config.ifcr += STM32_DMA_IFCR(chan->id); |
1649 | |
1650 | chan->mdma_config.tcf = STM32_DMA_TCI; |
1651 | chan->mdma_config.tcf <<= STM32_DMA_FLAGS_SHIFT(chan->id); |
1652 | } |
1653 | |
1654 | ret = dma_async_device_register(device: dd); |
1655 | if (ret) |
1656 | goto clk_free; |
1657 | |
1658 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { |
1659 | chan = &dmadev->chan[i]; |
1660 | ret = platform_get_irq(pdev, i); |
1661 | if (ret < 0) |
1662 | goto err_unregister; |
1663 | chan->irq = ret; |
1664 | |
1665 | ret = devm_request_irq(dev: &pdev->dev, irq: chan->irq, |
1666 | handler: stm32_dma_chan_irq, irqflags: 0, |
1667 | devname: dev_name(dev: chan2dev(chan)), dev_id: chan); |
1668 | if (ret) { |
1669 | dev_err(&pdev->dev, |
1670 | "request_irq failed with err %d channel %d\n" , |
1671 | ret, i); |
1672 | goto err_unregister; |
1673 | } |
1674 | } |
1675 | |
1676 | ret = of_dma_controller_register(np: pdev->dev.of_node, |
1677 | of_dma_xlate: stm32_dma_of_xlate, data: dmadev); |
1678 | if (ret < 0) { |
1679 | dev_err(&pdev->dev, |
1680 | "STM32 DMA DMA OF registration failed %d\n" , ret); |
1681 | goto err_unregister; |
1682 | } |
1683 | |
1684 | platform_set_drvdata(pdev, data: dmadev); |
1685 | |
1686 | pm_runtime_set_active(dev: &pdev->dev); |
1687 | pm_runtime_enable(dev: &pdev->dev); |
1688 | pm_runtime_get_noresume(dev: &pdev->dev); |
1689 | pm_runtime_put(dev: &pdev->dev); |
1690 | |
1691 | dev_info(&pdev->dev, "STM32 DMA driver registered\n" ); |
1692 | |
1693 | return 0; |
1694 | |
1695 | err_unregister: |
1696 | dma_async_device_unregister(device: dd); |
1697 | clk_free: |
1698 | clk_disable_unprepare(clk: dmadev->clk); |
1699 | |
1700 | return ret; |
1701 | } |
1702 | |
1703 | #ifdef CONFIG_PM |
1704 | static int stm32_dma_runtime_suspend(struct device *dev) |
1705 | { |
1706 | struct stm32_dma_device *dmadev = dev_get_drvdata(dev); |
1707 | |
1708 | clk_disable_unprepare(clk: dmadev->clk); |
1709 | |
1710 | return 0; |
1711 | } |
1712 | |
1713 | static int stm32_dma_runtime_resume(struct device *dev) |
1714 | { |
1715 | struct stm32_dma_device *dmadev = dev_get_drvdata(dev); |
1716 | int ret; |
1717 | |
1718 | ret = clk_prepare_enable(clk: dmadev->clk); |
1719 | if (ret) { |
1720 | dev_err(dev, "failed to prepare_enable clock\n" ); |
1721 | return ret; |
1722 | } |
1723 | |
1724 | return 0; |
1725 | } |
1726 | #endif |
1727 | |
1728 | #ifdef CONFIG_PM_SLEEP |
1729 | static int stm32_dma_pm_suspend(struct device *dev) |
1730 | { |
1731 | struct stm32_dma_device *dmadev = dev_get_drvdata(dev); |
1732 | int id, ret, scr; |
1733 | |
1734 | ret = pm_runtime_resume_and_get(dev); |
1735 | if (ret < 0) |
1736 | return ret; |
1737 | |
1738 | for (id = 0; id < STM32_DMA_MAX_CHANNELS; id++) { |
1739 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); |
1740 | if (scr & STM32_DMA_SCR_EN) { |
1741 | dev_warn(dev, "Suspend is prevented by Chan %i\n" , id); |
1742 | return -EBUSY; |
1743 | } |
1744 | } |
1745 | |
1746 | pm_runtime_put_sync(dev); |
1747 | |
1748 | pm_runtime_force_suspend(dev); |
1749 | |
1750 | return 0; |
1751 | } |
1752 | |
1753 | static int stm32_dma_pm_resume(struct device *dev) |
1754 | { |
1755 | return pm_runtime_force_resume(dev); |
1756 | } |
1757 | #endif |
1758 | |
1759 | static const struct dev_pm_ops stm32_dma_pm_ops = { |
1760 | SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_pm_suspend, stm32_dma_pm_resume) |
1761 | SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend, |
1762 | stm32_dma_runtime_resume, NULL) |
1763 | }; |
1764 | |
1765 | static struct platform_driver stm32_dma_driver = { |
1766 | .driver = { |
1767 | .name = "stm32-dma" , |
1768 | .of_match_table = stm32_dma_of_match, |
1769 | .pm = &stm32_dma_pm_ops, |
1770 | }, |
1771 | .probe = stm32_dma_probe, |
1772 | }; |
1773 | |
1774 | static int __init stm32_dma_init(void) |
1775 | { |
1776 | return platform_driver_register(&stm32_dma_driver); |
1777 | } |
1778 | subsys_initcall(stm32_dma_init); |
1779 | |