1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * PXA2xx SPI DMA engine support. |
4 | * |
5 | * Copyright (C) 2013, 2021 Intel Corporation |
6 | * Author: Mika Westerberg <mika.westerberg@linux.intel.com> |
7 | */ |
8 | |
9 | #include <linux/device.h> |
10 | #include <linux/dma-mapping.h> |
11 | #include <linux/dmaengine.h> |
12 | #include <linux/scatterlist.h> |
13 | #include <linux/sizes.h> |
14 | |
15 | #include <linux/spi/pxa2xx_spi.h> |
16 | #include <linux/spi/spi.h> |
17 | |
18 | #include "spi-pxa2xx.h" |
19 | |
20 | static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, |
21 | bool error) |
22 | { |
23 | struct spi_message *msg = drv_data->controller->cur_msg; |
24 | |
25 | /* |
26 | * It is possible that one CPU is handling ROR interrupt and other |
27 | * just gets DMA completion. Calling pump_transfers() twice for the |
28 | * same transfer leads to problems thus we prevent concurrent calls |
29 | * by using dma_running. |
30 | */ |
31 | if (atomic_dec_and_test(v: &drv_data->dma_running)) { |
32 | /* |
33 | * If the other CPU is still handling the ROR interrupt we |
34 | * might not know about the error yet. So we re-check the |
35 | * ROR bit here before we clear the status register. |
36 | */ |
37 | if (!error) |
38 | error = read_SSSR_bits(drv_data, bits: drv_data->mask_sr) & SSSR_ROR; |
39 | |
40 | /* Clear status & disable interrupts */ |
41 | clear_SSCR1_bits(drv_data, bits: drv_data->dma_cr1); |
42 | write_SSSR_CS(drv_data, val: drv_data->clear_sr); |
43 | if (!pxa25x_ssp_comp(drv_data)) |
44 | pxa2xx_spi_write(drv_data, SSTO, val: 0); |
45 | |
46 | if (error) { |
47 | /* In case we got an error we disable the SSP now */ |
48 | pxa_ssp_disable(ssp: drv_data->ssp); |
49 | msg->status = -EIO; |
50 | } |
51 | |
52 | spi_finalize_current_transfer(ctlr: drv_data->controller); |
53 | } |
54 | } |
55 | |
56 | static void pxa2xx_spi_dma_callback(void *data) |
57 | { |
58 | pxa2xx_spi_dma_transfer_complete(drv_data: data, error: false); |
59 | } |
60 | |
61 | static struct dma_async_tx_descriptor * |
62 | pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, |
63 | enum dma_transfer_direction dir, |
64 | struct spi_transfer *xfer) |
65 | { |
66 | struct chip_data *chip = |
67 | spi_get_ctldata(spi: drv_data->controller->cur_msg->spi); |
68 | enum dma_slave_buswidth width; |
69 | struct dma_slave_config cfg; |
70 | struct dma_chan *chan; |
71 | struct sg_table *sgt; |
72 | int ret; |
73 | |
74 | switch (drv_data->n_bytes) { |
75 | case 1: |
76 | width = DMA_SLAVE_BUSWIDTH_1_BYTE; |
77 | break; |
78 | case 2: |
79 | width = DMA_SLAVE_BUSWIDTH_2_BYTES; |
80 | break; |
81 | default: |
82 | width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
83 | break; |
84 | } |
85 | |
86 | memset(&cfg, 0, sizeof(cfg)); |
87 | cfg.direction = dir; |
88 | |
89 | if (dir == DMA_MEM_TO_DEV) { |
90 | cfg.dst_addr = drv_data->ssp->phys_base + SSDR; |
91 | cfg.dst_addr_width = width; |
92 | cfg.dst_maxburst = chip->dma_burst_size; |
93 | |
94 | sgt = &xfer->tx_sg; |
95 | chan = drv_data->controller->dma_tx; |
96 | } else { |
97 | cfg.src_addr = drv_data->ssp->phys_base + SSDR; |
98 | cfg.src_addr_width = width; |
99 | cfg.src_maxburst = chip->dma_burst_size; |
100 | |
101 | sgt = &xfer->rx_sg; |
102 | chan = drv_data->controller->dma_rx; |
103 | } |
104 | |
105 | ret = dmaengine_slave_config(chan, config: &cfg); |
106 | if (ret) { |
107 | dev_warn(drv_data->ssp->dev, "DMA slave config failed\n" ); |
108 | return NULL; |
109 | } |
110 | |
111 | return dmaengine_prep_slave_sg(chan, sgl: sgt->sgl, sg_len: sgt->nents, dir, |
112 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
113 | } |
114 | |
115 | irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) |
116 | { |
117 | u32 status; |
118 | |
119 | status = read_SSSR_bits(drv_data, bits: drv_data->mask_sr); |
120 | if (status & SSSR_ROR) { |
121 | dev_err(drv_data->ssp->dev, "FIFO overrun\n" ); |
122 | |
123 | dmaengine_terminate_async(chan: drv_data->controller->dma_rx); |
124 | dmaengine_terminate_async(chan: drv_data->controller->dma_tx); |
125 | |
126 | pxa2xx_spi_dma_transfer_complete(drv_data, error: true); |
127 | return IRQ_HANDLED; |
128 | } |
129 | |
130 | return IRQ_NONE; |
131 | } |
132 | |
133 | int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, |
134 | struct spi_transfer *xfer) |
135 | { |
136 | struct dma_async_tx_descriptor *tx_desc, *rx_desc; |
137 | int err; |
138 | |
139 | tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, dir: DMA_MEM_TO_DEV, xfer); |
140 | if (!tx_desc) { |
141 | dev_err(drv_data->ssp->dev, "failed to get DMA TX descriptor\n" ); |
142 | err = -EBUSY; |
143 | goto err_tx; |
144 | } |
145 | |
146 | rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, dir: DMA_DEV_TO_MEM, xfer); |
147 | if (!rx_desc) { |
148 | dev_err(drv_data->ssp->dev, "failed to get DMA RX descriptor\n" ); |
149 | err = -EBUSY; |
150 | goto err_rx; |
151 | } |
152 | |
153 | /* We are ready when RX completes */ |
154 | rx_desc->callback = pxa2xx_spi_dma_callback; |
155 | rx_desc->callback_param = drv_data; |
156 | |
157 | dmaengine_submit(desc: rx_desc); |
158 | dmaengine_submit(desc: tx_desc); |
159 | return 0; |
160 | |
161 | err_rx: |
162 | dmaengine_terminate_async(chan: drv_data->controller->dma_tx); |
163 | err_tx: |
164 | return err; |
165 | } |
166 | |
167 | void pxa2xx_spi_dma_start(struct driver_data *drv_data) |
168 | { |
169 | dma_async_issue_pending(chan: drv_data->controller->dma_rx); |
170 | dma_async_issue_pending(chan: drv_data->controller->dma_tx); |
171 | |
172 | atomic_set(v: &drv_data->dma_running, i: 1); |
173 | } |
174 | |
175 | void pxa2xx_spi_dma_stop(struct driver_data *drv_data) |
176 | { |
177 | atomic_set(v: &drv_data->dma_running, i: 0); |
178 | dmaengine_terminate_sync(chan: drv_data->controller->dma_rx); |
179 | dmaengine_terminate_sync(chan: drv_data->controller->dma_tx); |
180 | } |
181 | |
182 | int pxa2xx_spi_dma_setup(struct driver_data *drv_data) |
183 | { |
184 | struct pxa2xx_spi_controller *pdata = drv_data->controller_info; |
185 | struct spi_controller *controller = drv_data->controller; |
186 | struct device *dev = drv_data->ssp->dev; |
187 | dma_cap_mask_t mask; |
188 | |
189 | dma_cap_zero(mask); |
190 | dma_cap_set(DMA_SLAVE, mask); |
191 | |
192 | controller->dma_tx = dma_request_slave_channel_compat(mask, |
193 | fn: pdata->dma_filter, fn_param: pdata->tx_param, dev, name: "tx" ); |
194 | if (!controller->dma_tx) |
195 | return -ENODEV; |
196 | |
197 | controller->dma_rx = dma_request_slave_channel_compat(mask, |
198 | fn: pdata->dma_filter, fn_param: pdata->rx_param, dev, name: "rx" ); |
199 | if (!controller->dma_rx) { |
200 | dma_release_channel(chan: controller->dma_tx); |
201 | controller->dma_tx = NULL; |
202 | return -ENODEV; |
203 | } |
204 | |
205 | return 0; |
206 | } |
207 | |
208 | void pxa2xx_spi_dma_release(struct driver_data *drv_data) |
209 | { |
210 | struct spi_controller *controller = drv_data->controller; |
211 | |
212 | if (controller->dma_rx) { |
213 | dmaengine_terminate_sync(chan: controller->dma_rx); |
214 | dma_release_channel(chan: controller->dma_rx); |
215 | controller->dma_rx = NULL; |
216 | } |
217 | if (controller->dma_tx) { |
218 | dmaengine_terminate_sync(chan: controller->dma_tx); |
219 | dma_release_channel(chan: controller->dma_tx); |
220 | controller->dma_tx = NULL; |
221 | } |
222 | } |
223 | |
224 | int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, |
225 | struct spi_device *spi, |
226 | u8 bits_per_word, u32 *burst_code, |
227 | u32 *threshold) |
228 | { |
229 | struct pxa2xx_spi_chip *chip_info = spi->controller_data; |
230 | struct driver_data *drv_data = spi_controller_get_devdata(ctlr: spi->controller); |
231 | u32 dma_burst_size = drv_data->controller_info->dma_burst_size; |
232 | |
233 | /* |
234 | * If the DMA burst size is given in chip_info we use that, |
235 | * otherwise we use the default. Also we use the default FIFO |
236 | * thresholds for now. |
237 | */ |
238 | *burst_code = chip_info ? chip_info->dma_burst_size : dma_burst_size; |
239 | *threshold = SSCR1_RxTresh(RX_THRESH_DFLT) |
240 | | SSCR1_TxTresh(TX_THRESH_DFLT); |
241 | |
242 | return 0; |
243 | } |
244 | |