1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Special handling for DW DMA core |
4 | * |
5 | * Copyright (c) 2009, 2014 Intel Corporation. |
6 | */ |
7 | |
8 | #include <linux/completion.h> |
9 | #include <linux/dma-mapping.h> |
10 | #include <linux/dmaengine.h> |
11 | #include <linux/irqreturn.h> |
12 | #include <linux/jiffies.h> |
13 | #include <linux/module.h> |
14 | #include <linux/pci.h> |
15 | #include <linux/platform_data/dma-dw.h> |
16 | #include <linux/spi/spi.h> |
17 | #include <linux/types.h> |
18 | |
19 | #include "spi-dw.h" |
20 | |
21 | #define DW_SPI_RX_BUSY 0 |
22 | #define DW_SPI_RX_BURST_LEVEL 16 |
23 | #define DW_SPI_TX_BUSY 1 |
24 | #define DW_SPI_TX_BURST_LEVEL 16 |
25 | |
26 | static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param) |
27 | { |
28 | struct dw_dma_slave *s = param; |
29 | |
30 | if (s->dma_dev != chan->device->dev) |
31 | return false; |
32 | |
33 | chan->private = s; |
34 | return true; |
35 | } |
36 | |
37 | static void dw_spi_dma_maxburst_init(struct dw_spi *dws) |
38 | { |
39 | struct dma_slave_caps caps; |
40 | u32 max_burst, def_burst; |
41 | int ret; |
42 | |
43 | def_burst = dws->fifo_len / 2; |
44 | |
45 | ret = dma_get_slave_caps(chan: dws->rxchan, caps: &caps); |
46 | if (!ret && caps.max_burst) |
47 | max_burst = caps.max_burst; |
48 | else |
49 | max_burst = DW_SPI_RX_BURST_LEVEL; |
50 | |
51 | dws->rxburst = min(max_burst, def_burst); |
52 | dw_writel(dws, DW_SPI_DMARDLR, val: dws->rxburst - 1); |
53 | |
54 | ret = dma_get_slave_caps(chan: dws->txchan, caps: &caps); |
55 | if (!ret && caps.max_burst) |
56 | max_burst = caps.max_burst; |
57 | else |
58 | max_burst = DW_SPI_TX_BURST_LEVEL; |
59 | |
60 | /* |
61 | * Having a Rx DMA channel serviced with higher priority than a Tx DMA |
62 | * channel might not be enough to provide a well balanced DMA-based |
63 | * SPI transfer interface. There might still be moments when the Tx DMA |
64 | * channel is occasionally handled faster than the Rx DMA channel. |
65 | * That in its turn will eventually cause the SPI Rx FIFO overflow if |
66 | * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's |
67 | * cleared by the Rx DMA channel. In order to fix the problem the Tx |
68 | * DMA activity is intentionally slowed down by limiting the SPI Tx |
69 | * FIFO depth with a value twice bigger than the Tx burst length. |
70 | */ |
71 | dws->txburst = min(max_burst, def_burst); |
72 | dw_writel(dws, DW_SPI_DMATDLR, val: dws->txburst); |
73 | } |
74 | |
75 | static int dw_spi_dma_caps_init(struct dw_spi *dws) |
76 | { |
77 | struct dma_slave_caps tx, rx; |
78 | int ret; |
79 | |
80 | ret = dma_get_slave_caps(chan: dws->txchan, caps: &tx); |
81 | if (ret) |
82 | return ret; |
83 | |
84 | ret = dma_get_slave_caps(chan: dws->rxchan, caps: &rx); |
85 | if (ret) |
86 | return ret; |
87 | |
88 | if (!(tx.directions & BIT(DMA_MEM_TO_DEV) && |
89 | rx.directions & BIT(DMA_DEV_TO_MEM))) |
90 | return -ENXIO; |
91 | |
92 | if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0) |
93 | dws->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst); |
94 | else if (tx.max_sg_burst > 0) |
95 | dws->dma_sg_burst = tx.max_sg_burst; |
96 | else if (rx.max_sg_burst > 0) |
97 | dws->dma_sg_burst = rx.max_sg_burst; |
98 | else |
99 | dws->dma_sg_burst = 0; |
100 | |
101 | /* |
102 | * Assuming both channels belong to the same DMA controller hence the |
103 | * peripheral side address width capabilities most likely would be |
104 | * the same. |
105 | */ |
106 | dws->dma_addr_widths = tx.dst_addr_widths & rx.src_addr_widths; |
107 | |
108 | return 0; |
109 | } |
110 | |
111 | static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws) |
112 | { |
113 | struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx; |
114 | struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx; |
115 | struct pci_dev *dma_dev; |
116 | dma_cap_mask_t mask; |
117 | int ret = -EBUSY; |
118 | |
119 | /* |
120 | * Get pci device for DMA controller, currently it could only |
121 | * be the DMA controller of Medfield |
122 | */ |
123 | dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device: 0x0827, NULL); |
124 | if (!dma_dev) |
125 | return -ENODEV; |
126 | |
127 | dma_cap_zero(mask); |
128 | dma_cap_set(DMA_SLAVE, mask); |
129 | |
130 | /* 1. Init rx channel */ |
131 | rx->dma_dev = &dma_dev->dev; |
132 | dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx); |
133 | if (!dws->rxchan) |
134 | goto err_exit; |
135 | |
136 | /* 2. Init tx channel */ |
137 | tx->dma_dev = &dma_dev->dev; |
138 | dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx); |
139 | if (!dws->txchan) |
140 | goto free_rxchan; |
141 | |
142 | dws->host->dma_rx = dws->rxchan; |
143 | dws->host->dma_tx = dws->txchan; |
144 | |
145 | init_completion(x: &dws->dma_completion); |
146 | |
147 | ret = dw_spi_dma_caps_init(dws); |
148 | if (ret) |
149 | goto free_txchan; |
150 | |
151 | dw_spi_dma_maxburst_init(dws); |
152 | |
153 | pci_dev_put(dev: dma_dev); |
154 | |
155 | return 0; |
156 | |
157 | free_txchan: |
158 | dma_release_channel(chan: dws->txchan); |
159 | dws->txchan = NULL; |
160 | free_rxchan: |
161 | dma_release_channel(chan: dws->rxchan); |
162 | dws->rxchan = NULL; |
163 | err_exit: |
164 | pci_dev_put(dev: dma_dev); |
165 | return ret; |
166 | } |
167 | |
168 | static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) |
169 | { |
170 | int ret; |
171 | |
172 | dws->rxchan = dma_request_chan(dev, name: "rx" ); |
173 | if (IS_ERR(ptr: dws->rxchan)) { |
174 | ret = PTR_ERR(ptr: dws->rxchan); |
175 | dws->rxchan = NULL; |
176 | goto err_exit; |
177 | } |
178 | |
179 | dws->txchan = dma_request_chan(dev, name: "tx" ); |
180 | if (IS_ERR(ptr: dws->txchan)) { |
181 | ret = PTR_ERR(ptr: dws->txchan); |
182 | dws->txchan = NULL; |
183 | goto free_rxchan; |
184 | } |
185 | |
186 | dws->host->dma_rx = dws->rxchan; |
187 | dws->host->dma_tx = dws->txchan; |
188 | |
189 | init_completion(x: &dws->dma_completion); |
190 | |
191 | ret = dw_spi_dma_caps_init(dws); |
192 | if (ret) |
193 | goto free_txchan; |
194 | |
195 | dw_spi_dma_maxburst_init(dws); |
196 | |
197 | return 0; |
198 | |
199 | free_txchan: |
200 | dma_release_channel(chan: dws->txchan); |
201 | dws->txchan = NULL; |
202 | free_rxchan: |
203 | dma_release_channel(chan: dws->rxchan); |
204 | dws->rxchan = NULL; |
205 | err_exit: |
206 | return ret; |
207 | } |
208 | |
209 | static void dw_spi_dma_exit(struct dw_spi *dws) |
210 | { |
211 | if (dws->txchan) { |
212 | dmaengine_terminate_sync(chan: dws->txchan); |
213 | dma_release_channel(chan: dws->txchan); |
214 | } |
215 | |
216 | if (dws->rxchan) { |
217 | dmaengine_terminate_sync(chan: dws->rxchan); |
218 | dma_release_channel(chan: dws->rxchan); |
219 | } |
220 | } |
221 | |
222 | static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws) |
223 | { |
224 | dw_spi_check_status(dws, raw: false); |
225 | |
226 | complete(&dws->dma_completion); |
227 | |
228 | return IRQ_HANDLED; |
229 | } |
230 | |
231 | static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes) |
232 | { |
233 | switch (n_bytes) { |
234 | case 1: |
235 | return DMA_SLAVE_BUSWIDTH_1_BYTE; |
236 | case 2: |
237 | return DMA_SLAVE_BUSWIDTH_2_BYTES; |
238 | case 4: |
239 | return DMA_SLAVE_BUSWIDTH_4_BYTES; |
240 | default: |
241 | return DMA_SLAVE_BUSWIDTH_UNDEFINED; |
242 | } |
243 | } |
244 | |
245 | static bool dw_spi_can_dma(struct spi_controller *host, |
246 | struct spi_device *spi, struct spi_transfer *xfer) |
247 | { |
248 | struct dw_spi *dws = spi_controller_get_devdata(ctlr: host); |
249 | enum dma_slave_buswidth dma_bus_width; |
250 | |
251 | if (xfer->len <= dws->fifo_len) |
252 | return false; |
253 | |
254 | dma_bus_width = dw_spi_dma_convert_width(n_bytes: dws->n_bytes); |
255 | |
256 | return dws->dma_addr_widths & BIT(dma_bus_width); |
257 | } |
258 | |
259 | static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed) |
260 | { |
261 | unsigned long long ms; |
262 | |
263 | ms = len * MSEC_PER_SEC * BITS_PER_BYTE; |
264 | do_div(ms, speed); |
265 | ms += ms + 200; |
266 | |
267 | if (ms > UINT_MAX) |
268 | ms = UINT_MAX; |
269 | |
270 | ms = wait_for_completion_timeout(x: &dws->dma_completion, |
271 | timeout: msecs_to_jiffies(m: ms)); |
272 | |
273 | if (ms == 0) { |
274 | dev_err(&dws->host->cur_msg->spi->dev, |
275 | "DMA transaction timed out\n" ); |
276 | return -ETIMEDOUT; |
277 | } |
278 | |
279 | return 0; |
280 | } |
281 | |
282 | static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws) |
283 | { |
284 | return !(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_TF_EMPT); |
285 | } |
286 | |
287 | static int dw_spi_dma_wait_tx_done(struct dw_spi *dws, |
288 | struct spi_transfer *xfer) |
289 | { |
290 | int retry = DW_SPI_WAIT_RETRIES; |
291 | struct spi_delay delay; |
292 | u32 nents; |
293 | |
294 | nents = dw_readl(dws, DW_SPI_TXFLR); |
295 | delay.unit = SPI_DELAY_UNIT_SCK; |
296 | delay.value = nents * dws->n_bytes * BITS_PER_BYTE; |
297 | |
298 | while (dw_spi_dma_tx_busy(dws) && retry--) |
299 | spi_delay_exec(delay: &delay, xfer); |
300 | |
301 | if (retry < 0) { |
302 | dev_err(&dws->host->dev, "Tx hanged up\n" ); |
303 | return -EIO; |
304 | } |
305 | |
306 | return 0; |
307 | } |
308 | |
309 | /* |
310 | * dws->dma_chan_busy is set before the dma transfer starts, callback for tx |
311 | * channel will clear a corresponding bit. |
312 | */ |
313 | static void dw_spi_dma_tx_done(void *arg) |
314 | { |
315 | struct dw_spi *dws = arg; |
316 | |
317 | clear_bit(DW_SPI_TX_BUSY, addr: &dws->dma_chan_busy); |
318 | if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy)) |
319 | return; |
320 | |
321 | complete(&dws->dma_completion); |
322 | } |
323 | |
324 | static int dw_spi_dma_config_tx(struct dw_spi *dws) |
325 | { |
326 | struct dma_slave_config txconf; |
327 | |
328 | memset(&txconf, 0, sizeof(txconf)); |
329 | txconf.direction = DMA_MEM_TO_DEV; |
330 | txconf.dst_addr = dws->dma_addr; |
331 | txconf.dst_maxburst = dws->txburst; |
332 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
333 | txconf.dst_addr_width = dw_spi_dma_convert_width(n_bytes: dws->n_bytes); |
334 | txconf.device_fc = false; |
335 | |
336 | return dmaengine_slave_config(chan: dws->txchan, config: &txconf); |
337 | } |
338 | |
339 | static int dw_spi_dma_submit_tx(struct dw_spi *dws, struct scatterlist *sgl, |
340 | unsigned int nents) |
341 | { |
342 | struct dma_async_tx_descriptor *txdesc; |
343 | dma_cookie_t cookie; |
344 | int ret; |
345 | |
346 | txdesc = dmaengine_prep_slave_sg(chan: dws->txchan, sgl, sg_len: nents, |
347 | dir: DMA_MEM_TO_DEV, |
348 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
349 | if (!txdesc) |
350 | return -ENOMEM; |
351 | |
352 | txdesc->callback = dw_spi_dma_tx_done; |
353 | txdesc->callback_param = dws; |
354 | |
355 | cookie = dmaengine_submit(desc: txdesc); |
356 | ret = dma_submit_error(cookie); |
357 | if (ret) { |
358 | dmaengine_terminate_sync(chan: dws->txchan); |
359 | return ret; |
360 | } |
361 | |
362 | set_bit(DW_SPI_TX_BUSY, addr: &dws->dma_chan_busy); |
363 | |
364 | return 0; |
365 | } |
366 | |
367 | static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws) |
368 | { |
369 | return !!(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_RF_NOT_EMPT); |
370 | } |
371 | |
372 | static int dw_spi_dma_wait_rx_done(struct dw_spi *dws) |
373 | { |
374 | int retry = DW_SPI_WAIT_RETRIES; |
375 | struct spi_delay delay; |
376 | unsigned long ns, us; |
377 | u32 nents; |
378 | |
379 | /* |
380 | * It's unlikely that DMA engine is still doing the data fetching, but |
381 | * if it's let's give it some reasonable time. The timeout calculation |
382 | * is based on the synchronous APB/SSI reference clock rate, on a |
383 | * number of data entries left in the Rx FIFO, times a number of clock |
384 | * periods normally needed for a single APB read/write transaction |
385 | * without PREADY signal utilized (which is true for the DW APB SSI |
386 | * controller). |
387 | */ |
388 | nents = dw_readl(dws, DW_SPI_RXFLR); |
389 | ns = 4U * NSEC_PER_SEC / dws->max_freq * nents; |
390 | if (ns <= NSEC_PER_USEC) { |
391 | delay.unit = SPI_DELAY_UNIT_NSECS; |
392 | delay.value = ns; |
393 | } else { |
394 | us = DIV_ROUND_UP(ns, NSEC_PER_USEC); |
395 | delay.unit = SPI_DELAY_UNIT_USECS; |
396 | delay.value = clamp_val(us, 0, USHRT_MAX); |
397 | } |
398 | |
399 | while (dw_spi_dma_rx_busy(dws) && retry--) |
400 | spi_delay_exec(delay: &delay, NULL); |
401 | |
402 | if (retry < 0) { |
403 | dev_err(&dws->host->dev, "Rx hanged up\n" ); |
404 | return -EIO; |
405 | } |
406 | |
407 | return 0; |
408 | } |
409 | |
410 | /* |
411 | * dws->dma_chan_busy is set before the dma transfer starts, callback for rx |
412 | * channel will clear a corresponding bit. |
413 | */ |
414 | static void dw_spi_dma_rx_done(void *arg) |
415 | { |
416 | struct dw_spi *dws = arg; |
417 | |
418 | clear_bit(DW_SPI_RX_BUSY, addr: &dws->dma_chan_busy); |
419 | if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy)) |
420 | return; |
421 | |
422 | complete(&dws->dma_completion); |
423 | } |
424 | |
425 | static int dw_spi_dma_config_rx(struct dw_spi *dws) |
426 | { |
427 | struct dma_slave_config rxconf; |
428 | |
429 | memset(&rxconf, 0, sizeof(rxconf)); |
430 | rxconf.direction = DMA_DEV_TO_MEM; |
431 | rxconf.src_addr = dws->dma_addr; |
432 | rxconf.src_maxburst = dws->rxburst; |
433 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
434 | rxconf.src_addr_width = dw_spi_dma_convert_width(n_bytes: dws->n_bytes); |
435 | rxconf.device_fc = false; |
436 | |
437 | return dmaengine_slave_config(chan: dws->rxchan, config: &rxconf); |
438 | } |
439 | |
440 | static int dw_spi_dma_submit_rx(struct dw_spi *dws, struct scatterlist *sgl, |
441 | unsigned int nents) |
442 | { |
443 | struct dma_async_tx_descriptor *rxdesc; |
444 | dma_cookie_t cookie; |
445 | int ret; |
446 | |
447 | rxdesc = dmaengine_prep_slave_sg(chan: dws->rxchan, sgl, sg_len: nents, |
448 | dir: DMA_DEV_TO_MEM, |
449 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
450 | if (!rxdesc) |
451 | return -ENOMEM; |
452 | |
453 | rxdesc->callback = dw_spi_dma_rx_done; |
454 | rxdesc->callback_param = dws; |
455 | |
456 | cookie = dmaengine_submit(desc: rxdesc); |
457 | ret = dma_submit_error(cookie); |
458 | if (ret) { |
459 | dmaengine_terminate_sync(chan: dws->rxchan); |
460 | return ret; |
461 | } |
462 | |
463 | set_bit(DW_SPI_RX_BUSY, addr: &dws->dma_chan_busy); |
464 | |
465 | return 0; |
466 | } |
467 | |
468 | static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) |
469 | { |
470 | u16 imr, dma_ctrl; |
471 | int ret; |
472 | |
473 | if (!xfer->tx_buf) |
474 | return -EINVAL; |
475 | |
476 | /* Setup DMA channels */ |
477 | ret = dw_spi_dma_config_tx(dws); |
478 | if (ret) |
479 | return ret; |
480 | |
481 | if (xfer->rx_buf) { |
482 | ret = dw_spi_dma_config_rx(dws); |
483 | if (ret) |
484 | return ret; |
485 | } |
486 | |
487 | /* Set the DMA handshaking interface */ |
488 | dma_ctrl = DW_SPI_DMACR_TDMAE; |
489 | if (xfer->rx_buf) |
490 | dma_ctrl |= DW_SPI_DMACR_RDMAE; |
491 | dw_writel(dws, DW_SPI_DMACR, val: dma_ctrl); |
492 | |
493 | /* Set the interrupt mask */ |
494 | imr = DW_SPI_INT_TXOI; |
495 | if (xfer->rx_buf) |
496 | imr |= DW_SPI_INT_RXUI | DW_SPI_INT_RXOI; |
497 | dw_spi_umask_intr(dws, mask: imr); |
498 | |
499 | reinit_completion(x: &dws->dma_completion); |
500 | |
501 | dws->transfer_handler = dw_spi_dma_transfer_handler; |
502 | |
503 | return 0; |
504 | } |
505 | |
506 | static int dw_spi_dma_transfer_all(struct dw_spi *dws, |
507 | struct spi_transfer *xfer) |
508 | { |
509 | int ret; |
510 | |
511 | /* Submit the DMA Tx transfer */ |
512 | ret = dw_spi_dma_submit_tx(dws, sgl: xfer->tx_sg.sgl, nents: xfer->tx_sg.nents); |
513 | if (ret) |
514 | goto err_clear_dmac; |
515 | |
516 | /* Submit the DMA Rx transfer if required */ |
517 | if (xfer->rx_buf) { |
518 | ret = dw_spi_dma_submit_rx(dws, sgl: xfer->rx_sg.sgl, |
519 | nents: xfer->rx_sg.nents); |
520 | if (ret) |
521 | goto err_clear_dmac; |
522 | |
523 | /* rx must be started before tx due to spi instinct */ |
524 | dma_async_issue_pending(chan: dws->rxchan); |
525 | } |
526 | |
527 | dma_async_issue_pending(chan: dws->txchan); |
528 | |
529 | ret = dw_spi_dma_wait(dws, len: xfer->len, speed: xfer->effective_speed_hz); |
530 | |
531 | err_clear_dmac: |
532 | dw_writel(dws, DW_SPI_DMACR, val: 0); |
533 | |
534 | return ret; |
535 | } |
536 | |
537 | /* |
538 | * In case if at least one of the requested DMA channels doesn't support the |
539 | * hardware accelerated SG list entries traverse, the DMA driver will most |
540 | * likely work that around by performing the IRQ-based SG list entries |
541 | * resubmission. That might and will cause a problem if the DMA Tx channel is |
542 | * recharged and re-executed before the Rx DMA channel. Due to |
543 | * non-deterministic IRQ-handler execution latency the DMA Tx channel will |
544 | * start pushing data to the SPI bus before the Rx DMA channel is even |
545 | * reinitialized with the next inbound SG list entry. By doing so the DMA Tx |
546 | * channel will implicitly start filling the DW APB SSI Rx FIFO up, which while |
547 | * the DMA Rx channel being recharged and re-executed will eventually be |
548 | * overflown. |
549 | * |
550 | * In order to solve the problem we have to feed the DMA engine with SG list |
551 | * entries one-by-one. It shall keep the DW APB SSI Tx and Rx FIFOs |
552 | * synchronized and prevent the Rx FIFO overflow. Since in general the tx_sg |
553 | * and rx_sg lists may have different number of entries of different lengths |
554 | * (though total length should match) let's virtually split the SG-lists to the |
555 | * set of DMA transfers, which length is a minimum of the ordered SG-entries |
556 | * lengths. An ASCII-sketch of the implemented algo is following: |
557 | * xfer->len |
558 | * |___________| |
559 | * tx_sg list: |___|____|__| |
560 | * rx_sg list: |_|____|____| |
561 | * DMA transfers: |_|_|__|_|__| |
562 | * |
563 | * Note in order to have this workaround solving the denoted problem the DMA |
564 | * engine driver should properly initialize the max_sg_burst capability and set |
565 | * the DMA device max segment size parameter with maximum data block size the |
566 | * DMA engine supports. |
567 | */ |
568 | |
569 | static int dw_spi_dma_transfer_one(struct dw_spi *dws, |
570 | struct spi_transfer *xfer) |
571 | { |
572 | struct scatterlist *tx_sg = NULL, *rx_sg = NULL, tx_tmp, rx_tmp; |
573 | unsigned int tx_len = 0, rx_len = 0; |
574 | unsigned int base, len; |
575 | int ret; |
576 | |
577 | sg_init_table(&tx_tmp, 1); |
578 | sg_init_table(&rx_tmp, 1); |
579 | |
580 | for (base = 0; base < xfer->len; base += len) { |
581 | /* Fetch next Tx DMA data chunk */ |
582 | if (!tx_len) { |
583 | tx_sg = !tx_sg ? &xfer->tx_sg.sgl[0] : sg_next(tx_sg); |
584 | sg_dma_address(&tx_tmp) = sg_dma_address(tx_sg); |
585 | tx_len = sg_dma_len(tx_sg); |
586 | } |
587 | |
588 | /* Fetch next Rx DMA data chunk */ |
589 | if (!rx_len) { |
590 | rx_sg = !rx_sg ? &xfer->rx_sg.sgl[0] : sg_next(rx_sg); |
591 | sg_dma_address(&rx_tmp) = sg_dma_address(rx_sg); |
592 | rx_len = sg_dma_len(rx_sg); |
593 | } |
594 | |
595 | len = min(tx_len, rx_len); |
596 | |
597 | sg_dma_len(&tx_tmp) = len; |
598 | sg_dma_len(&rx_tmp) = len; |
599 | |
600 | /* Submit DMA Tx transfer */ |
601 | ret = dw_spi_dma_submit_tx(dws, sgl: &tx_tmp, nents: 1); |
602 | if (ret) |
603 | break; |
604 | |
605 | /* Submit DMA Rx transfer */ |
606 | ret = dw_spi_dma_submit_rx(dws, sgl: &rx_tmp, nents: 1); |
607 | if (ret) |
608 | break; |
609 | |
610 | /* Rx must be started before Tx due to SPI instinct */ |
611 | dma_async_issue_pending(chan: dws->rxchan); |
612 | |
613 | dma_async_issue_pending(chan: dws->txchan); |
614 | |
615 | /* |
616 | * Here we only need to wait for the DMA transfer to be |
617 | * finished since SPI controller is kept enabled during the |
618 | * procedure this loop implements and there is no risk to lose |
619 | * data left in the Tx/Rx FIFOs. |
620 | */ |
621 | ret = dw_spi_dma_wait(dws, len, speed: xfer->effective_speed_hz); |
622 | if (ret) |
623 | break; |
624 | |
625 | reinit_completion(x: &dws->dma_completion); |
626 | |
627 | sg_dma_address(&tx_tmp) += len; |
628 | sg_dma_address(&rx_tmp) += len; |
629 | tx_len -= len; |
630 | rx_len -= len; |
631 | } |
632 | |
633 | dw_writel(dws, DW_SPI_DMACR, val: 0); |
634 | |
635 | return ret; |
636 | } |
637 | |
638 | static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) |
639 | { |
640 | unsigned int nents; |
641 | int ret; |
642 | |
643 | nents = max(xfer->tx_sg.nents, xfer->rx_sg.nents); |
644 | |
645 | /* |
646 | * Execute normal DMA-based transfer (which submits the Rx and Tx SG |
647 | * lists directly to the DMA engine at once) if either full hardware |
648 | * accelerated SG list traverse is supported by both channels, or the |
649 | * Tx-only SPI transfer is requested, or the DMA engine is capable to |
650 | * handle both SG lists on hardware accelerated basis. |
651 | */ |
652 | if (!dws->dma_sg_burst || !xfer->rx_buf || nents <= dws->dma_sg_burst) |
653 | ret = dw_spi_dma_transfer_all(dws, xfer); |
654 | else |
655 | ret = dw_spi_dma_transfer_one(dws, xfer); |
656 | if (ret) |
657 | return ret; |
658 | |
659 | if (dws->host->cur_msg->status == -EINPROGRESS) { |
660 | ret = dw_spi_dma_wait_tx_done(dws, xfer); |
661 | if (ret) |
662 | return ret; |
663 | } |
664 | |
665 | if (xfer->rx_buf && dws->host->cur_msg->status == -EINPROGRESS) |
666 | ret = dw_spi_dma_wait_rx_done(dws); |
667 | |
668 | return ret; |
669 | } |
670 | |
671 | static void dw_spi_dma_stop(struct dw_spi *dws) |
672 | { |
673 | if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy)) { |
674 | dmaengine_terminate_sync(chan: dws->txchan); |
675 | clear_bit(DW_SPI_TX_BUSY, addr: &dws->dma_chan_busy); |
676 | } |
677 | if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy)) { |
678 | dmaengine_terminate_sync(chan: dws->rxchan); |
679 | clear_bit(DW_SPI_RX_BUSY, addr: &dws->dma_chan_busy); |
680 | } |
681 | } |
682 | |
683 | static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = { |
684 | .dma_init = dw_spi_dma_init_mfld, |
685 | .dma_exit = dw_spi_dma_exit, |
686 | .dma_setup = dw_spi_dma_setup, |
687 | .can_dma = dw_spi_can_dma, |
688 | .dma_transfer = dw_spi_dma_transfer, |
689 | .dma_stop = dw_spi_dma_stop, |
690 | }; |
691 | |
692 | void dw_spi_dma_setup_mfld(struct dw_spi *dws) |
693 | { |
694 | dws->dma_ops = &dw_spi_dma_mfld_ops; |
695 | } |
696 | EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_mfld, SPI_DW_CORE); |
697 | |
698 | static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = { |
699 | .dma_init = dw_spi_dma_init_generic, |
700 | .dma_exit = dw_spi_dma_exit, |
701 | .dma_setup = dw_spi_dma_setup, |
702 | .can_dma = dw_spi_can_dma, |
703 | .dma_transfer = dw_spi_dma_transfer, |
704 | .dma_stop = dw_spi_dma_stop, |
705 | }; |
706 | |
707 | void dw_spi_dma_setup_generic(struct dw_spi *dws) |
708 | { |
709 | dws->dma_ops = &dw_spi_dma_generic_ops; |
710 | } |
711 | EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_generic, SPI_DW_CORE); |
712 | |