1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright (C) 2009 Texas Instruments. |
4 | * Copyright (C) 2010 EF Johnson Technologies |
5 | */ |
6 | |
7 | #include <linux/interrupt.h> |
8 | #include <linux/io.h> |
9 | #include <linux/gpio/consumer.h> |
10 | #include <linux/module.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/platform_device.h> |
13 | #include <linux/err.h> |
14 | #include <linux/clk.h> |
15 | #include <linux/dmaengine.h> |
16 | #include <linux/dma-mapping.h> |
17 | #include <linux/of.h> |
18 | #include <linux/spi/spi.h> |
19 | #include <linux/spi/spi_bitbang.h> |
20 | #include <linux/slab.h> |
21 | |
22 | #include <linux/platform_data/spi-davinci.h> |
23 | |
24 | #define CS_DEFAULT 0xFF |
25 | |
26 | #define SPIFMT_PHASE_MASK BIT(16) |
27 | #define SPIFMT_POLARITY_MASK BIT(17) |
28 | #define SPIFMT_DISTIMER_MASK BIT(18) |
29 | #define SPIFMT_SHIFTDIR_MASK BIT(20) |
30 | #define SPIFMT_WAITENA_MASK BIT(21) |
31 | #define SPIFMT_PARITYENA_MASK BIT(22) |
32 | #define SPIFMT_ODD_PARITY_MASK BIT(23) |
33 | #define SPIFMT_WDELAY_MASK 0x3f000000u |
34 | #define SPIFMT_WDELAY_SHIFT 24 |
35 | #define SPIFMT_PRESCALE_SHIFT 8 |
36 | |
37 | /* SPIPC0 */ |
38 | #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ |
39 | #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ |
40 | #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ |
41 | #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ |
42 | |
43 | #define SPIINT_MASKALL 0x0101035F |
44 | #define SPIINT_MASKINT 0x0000015F |
45 | #define SPI_INTLVL_1 0x000001FF |
46 | #define SPI_INTLVL_0 0x00000000 |
47 | |
48 | /* SPIDAT1 (upper 16 bit defines) */ |
49 | #define SPIDAT1_CSHOLD_MASK BIT(12) |
50 | #define SPIDAT1_WDEL BIT(10) |
51 | |
52 | /* SPIGCR1 */ |
53 | #define SPIGCR1_CLKMOD_MASK BIT(1) |
54 | #define SPIGCR1_MASTER_MASK BIT(0) |
55 | #define SPIGCR1_POWERDOWN_MASK BIT(8) |
56 | #define SPIGCR1_LOOPBACK_MASK BIT(16) |
57 | #define SPIGCR1_SPIENA_MASK BIT(24) |
58 | |
59 | /* SPIBUF */ |
60 | #define SPIBUF_TXFULL_MASK BIT(29) |
61 | #define SPIBUF_RXEMPTY_MASK BIT(31) |
62 | |
63 | /* SPIDELAY */ |
64 | #define SPIDELAY_C2TDELAY_SHIFT 24 |
65 | #define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT) |
66 | #define SPIDELAY_T2CDELAY_SHIFT 16 |
67 | #define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT) |
68 | #define SPIDELAY_T2EDELAY_SHIFT 8 |
69 | #define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT) |
70 | #define SPIDELAY_C2EDELAY_SHIFT 0 |
71 | #define SPIDELAY_C2EDELAY_MASK 0xFF |
72 | |
73 | /* Error Masks */ |
74 | #define SPIFLG_DLEN_ERR_MASK BIT(0) |
75 | #define SPIFLG_TIMEOUT_MASK BIT(1) |
76 | #define SPIFLG_PARERR_MASK BIT(2) |
77 | #define SPIFLG_DESYNC_MASK BIT(3) |
78 | #define SPIFLG_BITERR_MASK BIT(4) |
79 | #define SPIFLG_OVRRUN_MASK BIT(6) |
80 | #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) |
81 | #define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \ |
82 | | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ |
83 | | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ |
84 | | SPIFLG_OVRRUN_MASK) |
85 | |
86 | #define SPIINT_DMA_REQ_EN BIT(16) |
87 | |
88 | /* SPI Controller registers */ |
89 | #define SPIGCR0 0x00 |
90 | #define SPIGCR1 0x04 |
91 | #define SPIINT 0x08 |
92 | #define SPILVL 0x0c |
93 | #define SPIFLG 0x10 |
94 | #define SPIPC0 0x14 |
95 | #define SPIDAT1 0x3c |
96 | #define SPIBUF 0x40 |
97 | #define SPIDELAY 0x48 |
98 | #define SPIDEF 0x4c |
99 | #define SPIFMT0 0x50 |
100 | |
101 | #define DMA_MIN_BYTES 16 |
102 | |
103 | /* SPI Controller driver's private data. */ |
104 | struct davinci_spi { |
105 | struct spi_bitbang bitbang; |
106 | struct clk *clk; |
107 | |
108 | u8 version; |
109 | resource_size_t pbase; |
110 | void __iomem *base; |
111 | u32 irq; |
112 | struct completion done; |
113 | |
114 | const void *tx; |
115 | void *rx; |
116 | int rcount; |
117 | int wcount; |
118 | |
119 | struct dma_chan *dma_rx; |
120 | struct dma_chan *dma_tx; |
121 | |
122 | struct davinci_spi_platform_data pdata; |
123 | |
124 | void (*get_rx)(u32 rx_data, struct davinci_spi *); |
125 | u32 (*get_tx)(struct davinci_spi *); |
126 | |
127 | u8 *bytes_per_word; |
128 | |
129 | u8 prescaler_limit; |
130 | }; |
131 | |
132 | static struct davinci_spi_config davinci_spi_default_cfg; |
133 | |
134 | static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi) |
135 | { |
136 | if (dspi->rx) { |
137 | u8 *rx = dspi->rx; |
138 | *rx++ = (u8)data; |
139 | dspi->rx = rx; |
140 | } |
141 | } |
142 | |
143 | static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi) |
144 | { |
145 | if (dspi->rx) { |
146 | u16 *rx = dspi->rx; |
147 | *rx++ = (u16)data; |
148 | dspi->rx = rx; |
149 | } |
150 | } |
151 | |
152 | static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi) |
153 | { |
154 | u32 data = 0; |
155 | |
156 | if (dspi->tx) { |
157 | const u8 *tx = dspi->tx; |
158 | |
159 | data = *tx++; |
160 | dspi->tx = tx; |
161 | } |
162 | return data; |
163 | } |
164 | |
165 | static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi) |
166 | { |
167 | u32 data = 0; |
168 | |
169 | if (dspi->tx) { |
170 | const u16 *tx = dspi->tx; |
171 | |
172 | data = *tx++; |
173 | dspi->tx = tx; |
174 | } |
175 | return data; |
176 | } |
177 | |
178 | static inline void set_io_bits(void __iomem *addr, u32 bits) |
179 | { |
180 | u32 v = ioread32(addr); |
181 | |
182 | v |= bits; |
183 | iowrite32(v, addr); |
184 | } |
185 | |
186 | static inline void clear_io_bits(void __iomem *addr, u32 bits) |
187 | { |
188 | u32 v = ioread32(addr); |
189 | |
190 | v &= ~bits; |
191 | iowrite32(v, addr); |
192 | } |
193 | |
194 | /* |
195 | * Interface to control the chip select signal |
196 | */ |
197 | static void davinci_spi_chipselect(struct spi_device *spi, int value) |
198 | { |
199 | struct davinci_spi *dspi; |
200 | struct davinci_spi_config *spicfg = spi->controller_data; |
201 | u8 chip_sel = spi_get_chipselect(spi, idx: 0); |
202 | u16 spidat1 = CS_DEFAULT; |
203 | |
204 | dspi = spi_controller_get_devdata(ctlr: spi->controller); |
205 | |
206 | /* program delay transfers if tx_delay is non zero */ |
207 | if (spicfg && spicfg->wdelay) |
208 | spidat1 |= SPIDAT1_WDEL; |
209 | |
210 | /* |
211 | * Board specific chip select logic decides the polarity and cs |
212 | * line for the controller |
213 | */ |
214 | if (spi_get_csgpiod(spi, idx: 0)) { |
215 | if (value == BITBANG_CS_ACTIVE) |
216 | gpiod_set_value(desc: spi_get_csgpiod(spi, idx: 0), value: 1); |
217 | else |
218 | gpiod_set_value(desc: spi_get_csgpiod(spi, idx: 0), value: 0); |
219 | } else { |
220 | if (value == BITBANG_CS_ACTIVE) { |
221 | if (!(spi->mode & SPI_CS_WORD)) |
222 | spidat1 |= SPIDAT1_CSHOLD_MASK; |
223 | spidat1 &= ~(0x1 << chip_sel); |
224 | } |
225 | } |
226 | |
227 | iowrite16(spidat1, dspi->base + SPIDAT1 + 2); |
228 | } |
229 | |
230 | /** |
231 | * davinci_spi_get_prescale - Calculates the correct prescale value |
232 | * @dspi: the controller data |
233 | * @max_speed_hz: the maximum rate the SPI clock can run at |
234 | * |
235 | * This function calculates the prescale value that generates a clock rate |
236 | * less than or equal to the specified maximum. |
237 | * |
238 | * Returns: calculated prescale value for easy programming into SPI registers |
239 | * or negative error number if valid prescalar cannot be updated. |
240 | */ |
241 | static inline int davinci_spi_get_prescale(struct davinci_spi *dspi, |
242 | u32 max_speed_hz) |
243 | { |
244 | int ret; |
245 | |
246 | /* Subtract 1 to match what will be programmed into SPI register. */ |
247 | ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz) - 1; |
248 | |
249 | if (ret < dspi->prescaler_limit || ret > 255) |
250 | return -EINVAL; |
251 | |
252 | return ret; |
253 | } |
254 | |
255 | /** |
256 | * davinci_spi_setup_transfer - This functions will determine transfer method |
257 | * @spi: spi device on which data transfer to be done |
258 | * @t: spi transfer in which transfer info is filled |
259 | * |
260 | * This function determines data transfer method (8/16/32 bit transfer). |
261 | * It will also set the SPI Clock Control register according to |
262 | * SPI slave device freq. |
263 | */ |
264 | static int davinci_spi_setup_transfer(struct spi_device *spi, |
265 | struct spi_transfer *t) |
266 | { |
267 | |
268 | struct davinci_spi *dspi; |
269 | struct davinci_spi_config *spicfg; |
270 | u8 bits_per_word = 0; |
271 | u32 hz = 0, spifmt = 0; |
272 | int prescale; |
273 | |
274 | dspi = spi_controller_get_devdata(ctlr: spi->controller); |
275 | spicfg = spi->controller_data; |
276 | if (!spicfg) |
277 | spicfg = &davinci_spi_default_cfg; |
278 | |
279 | if (t) { |
280 | bits_per_word = t->bits_per_word; |
281 | hz = t->speed_hz; |
282 | } |
283 | |
284 | /* if bits_per_word is not set then set it default */ |
285 | if (!bits_per_word) |
286 | bits_per_word = spi->bits_per_word; |
287 | |
288 | /* |
289 | * Assign function pointer to appropriate transfer method |
290 | * 8bit, 16bit or 32bit transfer |
291 | */ |
292 | if (bits_per_word <= 8) { |
293 | dspi->get_rx = davinci_spi_rx_buf_u8; |
294 | dspi->get_tx = davinci_spi_tx_buf_u8; |
295 | dspi->bytes_per_word[spi_get_chipselect(spi, idx: 0)] = 1; |
296 | } else { |
297 | dspi->get_rx = davinci_spi_rx_buf_u16; |
298 | dspi->get_tx = davinci_spi_tx_buf_u16; |
299 | dspi->bytes_per_word[spi_get_chipselect(spi, idx: 0)] = 2; |
300 | } |
301 | |
302 | if (!hz) |
303 | hz = spi->max_speed_hz; |
304 | |
305 | /* Set up SPIFMTn register, unique to this chipselect. */ |
306 | |
307 | prescale = davinci_spi_get_prescale(dspi, max_speed_hz: hz); |
308 | if (prescale < 0) |
309 | return prescale; |
310 | |
311 | spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f); |
312 | |
313 | if (spi->mode & SPI_LSB_FIRST) |
314 | spifmt |= SPIFMT_SHIFTDIR_MASK; |
315 | |
316 | if (spi->mode & SPI_CPOL) |
317 | spifmt |= SPIFMT_POLARITY_MASK; |
318 | |
319 | if (!(spi->mode & SPI_CPHA)) |
320 | spifmt |= SPIFMT_PHASE_MASK; |
321 | |
322 | /* |
323 | * Assume wdelay is used only on SPI peripherals that has this field |
324 | * in SPIFMTn register and when it's configured from board file or DT. |
325 | */ |
326 | if (spicfg->wdelay) |
327 | spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT) |
328 | & SPIFMT_WDELAY_MASK); |
329 | |
330 | /* |
331 | * Version 1 hardware supports two basic SPI modes: |
332 | * - Standard SPI mode uses 4 pins, with chipselect |
333 | * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) |
334 | * (distinct from SPI_3WIRE, with just one data wire; |
335 | * or similar variants without MOSI or without MISO) |
336 | * |
337 | * Version 2 hardware supports an optional handshaking signal, |
338 | * so it can support two more modes: |
339 | * - 5 pin SPI variant is standard SPI plus SPI_READY |
340 | * - 4 pin with enable is (SPI_READY | SPI_NO_CS) |
341 | */ |
342 | |
343 | if (dspi->version == SPI_VERSION_2) { |
344 | |
345 | u32 delay = 0; |
346 | |
347 | if (spicfg->odd_parity) |
348 | spifmt |= SPIFMT_ODD_PARITY_MASK; |
349 | |
350 | if (spicfg->parity_enable) |
351 | spifmt |= SPIFMT_PARITYENA_MASK; |
352 | |
353 | if (spicfg->timer_disable) { |
354 | spifmt |= SPIFMT_DISTIMER_MASK; |
355 | } else { |
356 | delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT) |
357 | & SPIDELAY_C2TDELAY_MASK; |
358 | delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT) |
359 | & SPIDELAY_T2CDELAY_MASK; |
360 | } |
361 | |
362 | if (spi->mode & SPI_READY) { |
363 | spifmt |= SPIFMT_WAITENA_MASK; |
364 | delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT) |
365 | & SPIDELAY_T2EDELAY_MASK; |
366 | delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT) |
367 | & SPIDELAY_C2EDELAY_MASK; |
368 | } |
369 | |
370 | iowrite32(delay, dspi->base + SPIDELAY); |
371 | } |
372 | |
373 | iowrite32(spifmt, dspi->base + SPIFMT0); |
374 | |
375 | return 0; |
376 | } |
377 | |
378 | static int davinci_spi_of_setup(struct spi_device *spi) |
379 | { |
380 | struct davinci_spi_config *spicfg = spi->controller_data; |
381 | struct device_node *np = spi->dev.of_node; |
382 | struct davinci_spi *dspi = spi_controller_get_devdata(ctlr: spi->controller); |
383 | u32 prop; |
384 | |
385 | if (spicfg == NULL && np) { |
386 | spicfg = kzalloc(size: sizeof(*spicfg), GFP_KERNEL); |
387 | if (!spicfg) |
388 | return -ENOMEM; |
389 | *spicfg = davinci_spi_default_cfg; |
390 | /* override with dt configured values */ |
391 | if (!of_property_read_u32(np, propname: "ti,spi-wdelay" , out_value: &prop)) |
392 | spicfg->wdelay = (u8)prop; |
393 | spi->controller_data = spicfg; |
394 | |
395 | if (dspi->dma_rx && dspi->dma_tx) |
396 | spicfg->io_type = SPI_IO_TYPE_DMA; |
397 | } |
398 | |
399 | return 0; |
400 | } |
401 | |
402 | /** |
403 | * davinci_spi_setup - This functions will set default transfer method |
404 | * @spi: spi device on which data transfer to be done |
405 | * |
406 | * This functions sets the default transfer method. |
407 | */ |
408 | static int davinci_spi_setup(struct spi_device *spi) |
409 | { |
410 | struct davinci_spi *dspi; |
411 | struct device_node *np = spi->dev.of_node; |
412 | bool internal_cs = true; |
413 | |
414 | dspi = spi_controller_get_devdata(ctlr: spi->controller); |
415 | |
416 | if (!(spi->mode & SPI_NO_CS)) { |
417 | if (np && spi_get_csgpiod(spi, idx: 0)) |
418 | internal_cs = false; |
419 | |
420 | if (internal_cs) |
421 | set_io_bits(addr: dspi->base + SPIPC0, bits: 1 << spi_get_chipselect(spi, idx: 0)); |
422 | } |
423 | |
424 | if (spi->mode & SPI_READY) |
425 | set_io_bits(addr: dspi->base + SPIPC0, SPIPC0_SPIENA_MASK); |
426 | |
427 | if (spi->mode & SPI_LOOP) |
428 | set_io_bits(addr: dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); |
429 | else |
430 | clear_io_bits(addr: dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); |
431 | |
432 | return davinci_spi_of_setup(spi); |
433 | } |
434 | |
435 | static void davinci_spi_cleanup(struct spi_device *spi) |
436 | { |
437 | struct davinci_spi_config *spicfg = spi->controller_data; |
438 | |
439 | spi->controller_data = NULL; |
440 | if (spi->dev.of_node) |
441 | kfree(objp: spicfg); |
442 | } |
443 | |
444 | static bool davinci_spi_can_dma(struct spi_controller *host, |
445 | struct spi_device *spi, |
446 | struct spi_transfer *xfer) |
447 | { |
448 | struct davinci_spi_config *spicfg = spi->controller_data; |
449 | bool can_dma = false; |
450 | |
451 | if (spicfg) |
452 | can_dma = (spicfg->io_type == SPI_IO_TYPE_DMA) && |
453 | (xfer->len >= DMA_MIN_BYTES) && |
454 | !is_vmalloc_addr(x: xfer->rx_buf) && |
455 | !is_vmalloc_addr(x: xfer->tx_buf); |
456 | |
457 | return can_dma; |
458 | } |
459 | |
460 | static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status) |
461 | { |
462 | struct device *sdev = dspi->bitbang.ctlr->dev.parent; |
463 | |
464 | if (int_status & SPIFLG_TIMEOUT_MASK) { |
465 | dev_err(sdev, "SPI Time-out Error\n" ); |
466 | return -ETIMEDOUT; |
467 | } |
468 | if (int_status & SPIFLG_DESYNC_MASK) { |
469 | dev_err(sdev, "SPI Desynchronization Error\n" ); |
470 | return -EIO; |
471 | } |
472 | if (int_status & SPIFLG_BITERR_MASK) { |
473 | dev_err(sdev, "SPI Bit error\n" ); |
474 | return -EIO; |
475 | } |
476 | |
477 | if (dspi->version == SPI_VERSION_2) { |
478 | if (int_status & SPIFLG_DLEN_ERR_MASK) { |
479 | dev_err(sdev, "SPI Data Length Error\n" ); |
480 | return -EIO; |
481 | } |
482 | if (int_status & SPIFLG_PARERR_MASK) { |
483 | dev_err(sdev, "SPI Parity Error\n" ); |
484 | return -EIO; |
485 | } |
486 | if (int_status & SPIFLG_OVRRUN_MASK) { |
487 | dev_err(sdev, "SPI Data Overrun error\n" ); |
488 | return -EIO; |
489 | } |
490 | if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { |
491 | dev_err(sdev, "SPI Buffer Init Active\n" ); |
492 | return -EBUSY; |
493 | } |
494 | } |
495 | |
496 | return 0; |
497 | } |
498 | |
499 | /** |
500 | * davinci_spi_process_events - check for and handle any SPI controller events |
501 | * @dspi: the controller data |
502 | * |
503 | * This function will check the SPIFLG register and handle any events that are |
504 | * detected there |
505 | */ |
506 | static int davinci_spi_process_events(struct davinci_spi *dspi) |
507 | { |
508 | u32 buf, status, errors = 0, spidat1; |
509 | |
510 | buf = ioread32(dspi->base + SPIBUF); |
511 | |
512 | if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) { |
513 | dspi->get_rx(buf & 0xFFFF, dspi); |
514 | dspi->rcount--; |
515 | } |
516 | |
517 | status = ioread32(dspi->base + SPIFLG); |
518 | |
519 | if (unlikely(status & SPIFLG_ERROR_MASK)) { |
520 | errors = status & SPIFLG_ERROR_MASK; |
521 | goto out; |
522 | } |
523 | |
524 | if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) { |
525 | spidat1 = ioread32(dspi->base + SPIDAT1); |
526 | dspi->wcount--; |
527 | spidat1 &= ~0xFFFF; |
528 | spidat1 |= 0xFFFF & dspi->get_tx(dspi); |
529 | iowrite32(spidat1, dspi->base + SPIDAT1); |
530 | } |
531 | |
532 | out: |
533 | return errors; |
534 | } |
535 | |
536 | static void davinci_spi_dma_rx_callback(void *data) |
537 | { |
538 | struct davinci_spi *dspi = (struct davinci_spi *)data; |
539 | |
540 | dspi->rcount = 0; |
541 | |
542 | if (!dspi->wcount && !dspi->rcount) |
543 | complete(&dspi->done); |
544 | } |
545 | |
546 | static void davinci_spi_dma_tx_callback(void *data) |
547 | { |
548 | struct davinci_spi *dspi = (struct davinci_spi *)data; |
549 | |
550 | dspi->wcount = 0; |
551 | |
552 | if (!dspi->wcount && !dspi->rcount) |
553 | complete(&dspi->done); |
554 | } |
555 | |
556 | /** |
557 | * davinci_spi_bufs - functions which will handle transfer data |
558 | * @spi: spi device on which data transfer to be done |
559 | * @t: spi transfer in which transfer info is filled |
560 | * |
561 | * This function will put data to be transferred into data register |
562 | * of SPI controller and then wait until the completion will be marked |
563 | * by the IRQ Handler. |
564 | */ |
565 | static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) |
566 | { |
567 | struct davinci_spi *dspi; |
568 | int data_type, ret = -ENOMEM; |
569 | u32 tx_data, spidat1; |
570 | u32 errors = 0; |
571 | struct davinci_spi_config *spicfg; |
572 | struct davinci_spi_platform_data *pdata; |
573 | |
574 | dspi = spi_controller_get_devdata(ctlr: spi->controller); |
575 | pdata = &dspi->pdata; |
576 | spicfg = (struct davinci_spi_config *)spi->controller_data; |
577 | if (!spicfg) |
578 | spicfg = &davinci_spi_default_cfg; |
579 | |
580 | /* convert len to words based on bits_per_word */ |
581 | data_type = dspi->bytes_per_word[spi_get_chipselect(spi, idx: 0)]; |
582 | |
583 | dspi->tx = t->tx_buf; |
584 | dspi->rx = t->rx_buf; |
585 | dspi->wcount = t->len / data_type; |
586 | dspi->rcount = dspi->wcount; |
587 | |
588 | spidat1 = ioread32(dspi->base + SPIDAT1); |
589 | |
590 | clear_io_bits(addr: dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); |
591 | set_io_bits(addr: dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); |
592 | |
593 | reinit_completion(x: &dspi->done); |
594 | |
595 | if (!davinci_spi_can_dma(host: spi->controller, spi, xfer: t)) { |
596 | if (spicfg->io_type != SPI_IO_TYPE_POLL) |
597 | set_io_bits(addr: dspi->base + SPIINT, SPIINT_MASKINT); |
598 | /* start the transfer */ |
599 | dspi->wcount--; |
600 | tx_data = dspi->get_tx(dspi); |
601 | spidat1 &= 0xFFFF0000; |
602 | spidat1 |= tx_data & 0xFFFF; |
603 | iowrite32(spidat1, dspi->base + SPIDAT1); |
604 | } else { |
605 | struct dma_slave_config dma_rx_conf = { |
606 | .direction = DMA_DEV_TO_MEM, |
607 | .src_addr = (unsigned long)dspi->pbase + SPIBUF, |
608 | .src_addr_width = data_type, |
609 | .src_maxburst = 1, |
610 | }; |
611 | struct dma_slave_config dma_tx_conf = { |
612 | .direction = DMA_MEM_TO_DEV, |
613 | .dst_addr = (unsigned long)dspi->pbase + SPIDAT1, |
614 | .dst_addr_width = data_type, |
615 | .dst_maxburst = 1, |
616 | }; |
617 | struct dma_async_tx_descriptor *rxdesc; |
618 | struct dma_async_tx_descriptor *txdesc; |
619 | |
620 | dmaengine_slave_config(chan: dspi->dma_rx, config: &dma_rx_conf); |
621 | dmaengine_slave_config(chan: dspi->dma_tx, config: &dma_tx_conf); |
622 | |
623 | rxdesc = dmaengine_prep_slave_sg(chan: dspi->dma_rx, |
624 | sgl: t->rx_sg.sgl, sg_len: t->rx_sg.nents, dir: DMA_DEV_TO_MEM, |
625 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
626 | if (!rxdesc) |
627 | goto err_desc; |
628 | |
629 | if (!t->tx_buf) { |
630 | /* To avoid errors when doing rx-only transfers with |
631 | * many SG entries (> 20), use the rx buffer as the |
632 | * dummy tx buffer so that dma reloads are done at the |
633 | * same time for rx and tx. |
634 | */ |
635 | t->tx_sg.sgl = t->rx_sg.sgl; |
636 | t->tx_sg.nents = t->rx_sg.nents; |
637 | } |
638 | |
639 | txdesc = dmaengine_prep_slave_sg(chan: dspi->dma_tx, |
640 | sgl: t->tx_sg.sgl, sg_len: t->tx_sg.nents, dir: DMA_MEM_TO_DEV, |
641 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
642 | if (!txdesc) |
643 | goto err_desc; |
644 | |
645 | rxdesc->callback = davinci_spi_dma_rx_callback; |
646 | rxdesc->callback_param = (void *)dspi; |
647 | txdesc->callback = davinci_spi_dma_tx_callback; |
648 | txdesc->callback_param = (void *)dspi; |
649 | |
650 | if (pdata->cshold_bug) |
651 | iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); |
652 | |
653 | dmaengine_submit(desc: rxdesc); |
654 | dmaengine_submit(desc: txdesc); |
655 | |
656 | dma_async_issue_pending(chan: dspi->dma_rx); |
657 | dma_async_issue_pending(chan: dspi->dma_tx); |
658 | |
659 | set_io_bits(addr: dspi->base + SPIINT, SPIINT_DMA_REQ_EN); |
660 | } |
661 | |
662 | /* Wait for the transfer to complete */ |
663 | if (spicfg->io_type != SPI_IO_TYPE_POLL) { |
664 | if (wait_for_completion_timeout(x: &dspi->done, HZ) == 0) |
665 | errors = SPIFLG_TIMEOUT_MASK; |
666 | } else { |
667 | while (dspi->rcount > 0 || dspi->wcount > 0) { |
668 | errors = davinci_spi_process_events(dspi); |
669 | if (errors) |
670 | break; |
671 | cpu_relax(); |
672 | } |
673 | } |
674 | |
675 | clear_io_bits(addr: dspi->base + SPIINT, SPIINT_MASKALL); |
676 | if (davinci_spi_can_dma(host: spi->controller, spi, xfer: t)) |
677 | clear_io_bits(addr: dspi->base + SPIINT, SPIINT_DMA_REQ_EN); |
678 | |
679 | clear_io_bits(addr: dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); |
680 | set_io_bits(addr: dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); |
681 | |
682 | /* |
683 | * Check for bit error, desync error,parity error,timeout error and |
684 | * receive overflow errors |
685 | */ |
686 | if (errors) { |
687 | ret = davinci_spi_check_error(dspi, int_status: errors); |
688 | WARN(!ret, "%s: error reported but no error found!\n" , |
689 | dev_name(&spi->dev)); |
690 | return ret; |
691 | } |
692 | |
693 | if (dspi->rcount != 0 || dspi->wcount != 0) { |
694 | dev_err(&spi->dev, "SPI data transfer error\n" ); |
695 | return -EIO; |
696 | } |
697 | |
698 | return t->len; |
699 | |
700 | err_desc: |
701 | return ret; |
702 | } |
703 | |
704 | /** |
705 | * dummy_thread_fn - dummy thread function |
706 | * @irq: IRQ number for this SPI Master |
707 | * @data: structure for SPI Master controller davinci_spi |
708 | * |
709 | * This is to satisfy the request_threaded_irq() API so that the irq |
710 | * handler is called in interrupt context. |
711 | */ |
712 | static irqreturn_t dummy_thread_fn(s32 irq, void *data) |
713 | { |
714 | return IRQ_HANDLED; |
715 | } |
716 | |
717 | /** |
718 | * davinci_spi_irq - Interrupt handler for SPI Master Controller |
719 | * @irq: IRQ number for this SPI Master |
720 | * @data: structure for SPI Master controller davinci_spi |
721 | * |
722 | * ISR will determine that interrupt arrives either for READ or WRITE command. |
723 | * According to command it will do the appropriate action. It will check |
724 | * transfer length and if it is not zero then dispatch transfer command again. |
725 | * If transfer length is zero then it will indicate the COMPLETION so that |
726 | * davinci_spi_bufs function can go ahead. |
727 | */ |
728 | static irqreturn_t davinci_spi_irq(s32 irq, void *data) |
729 | { |
730 | struct davinci_spi *dspi = data; |
731 | int status; |
732 | |
733 | status = davinci_spi_process_events(dspi); |
734 | if (unlikely(status != 0)) |
735 | clear_io_bits(addr: dspi->base + SPIINT, SPIINT_MASKINT); |
736 | |
737 | if ((!dspi->rcount && !dspi->wcount) || status) |
738 | complete(&dspi->done); |
739 | |
740 | return IRQ_HANDLED; |
741 | } |
742 | |
743 | static int davinci_spi_request_dma(struct davinci_spi *dspi) |
744 | { |
745 | struct device *sdev = dspi->bitbang.ctlr->dev.parent; |
746 | |
747 | dspi->dma_rx = dma_request_chan(dev: sdev, name: "rx" ); |
748 | if (IS_ERR(ptr: dspi->dma_rx)) |
749 | return PTR_ERR(ptr: dspi->dma_rx); |
750 | |
751 | dspi->dma_tx = dma_request_chan(dev: sdev, name: "tx" ); |
752 | if (IS_ERR(ptr: dspi->dma_tx)) { |
753 | dma_release_channel(chan: dspi->dma_rx); |
754 | return PTR_ERR(ptr: dspi->dma_tx); |
755 | } |
756 | |
757 | return 0; |
758 | } |
759 | |
760 | #if defined(CONFIG_OF) |
761 | |
762 | /* OF SPI data structure */ |
763 | struct davinci_spi_of_data { |
764 | u8 version; |
765 | u8 prescaler_limit; |
766 | }; |
767 | |
768 | static const struct davinci_spi_of_data dm6441_spi_data = { |
769 | .version = SPI_VERSION_1, |
770 | .prescaler_limit = 2, |
771 | }; |
772 | |
773 | static const struct davinci_spi_of_data da830_spi_data = { |
774 | .version = SPI_VERSION_2, |
775 | .prescaler_limit = 2, |
776 | }; |
777 | |
778 | static const struct davinci_spi_of_data keystone_spi_data = { |
779 | .version = SPI_VERSION_1, |
780 | .prescaler_limit = 0, |
781 | }; |
782 | |
783 | static const struct of_device_id davinci_spi_of_match[] = { |
784 | { |
785 | .compatible = "ti,dm6441-spi" , |
786 | .data = &dm6441_spi_data, |
787 | }, |
788 | { |
789 | .compatible = "ti,da830-spi" , |
790 | .data = &da830_spi_data, |
791 | }, |
792 | { |
793 | .compatible = "ti,keystone-spi" , |
794 | .data = &keystone_spi_data, |
795 | }, |
796 | { }, |
797 | }; |
798 | MODULE_DEVICE_TABLE(of, davinci_spi_of_match); |
799 | |
800 | /** |
801 | * spi_davinci_get_pdata - Get platform data from DTS binding |
802 | * @pdev: ptr to platform data |
803 | * @dspi: ptr to driver data |
804 | * |
805 | * Parses and populates pdata in dspi from device tree bindings. |
806 | * |
807 | * NOTE: Not all platform data params are supported currently. |
808 | */ |
809 | static int spi_davinci_get_pdata(struct platform_device *pdev, |
810 | struct davinci_spi *dspi) |
811 | { |
812 | struct device_node *node = pdev->dev.of_node; |
813 | const struct davinci_spi_of_data *spi_data; |
814 | struct davinci_spi_platform_data *pdata; |
815 | unsigned int num_cs, intr_line = 0; |
816 | |
817 | pdata = &dspi->pdata; |
818 | |
819 | spi_data = device_get_match_data(dev: &pdev->dev); |
820 | |
821 | pdata->version = spi_data->version; |
822 | pdata->prescaler_limit = spi_data->prescaler_limit; |
823 | /* |
824 | * default num_cs is 1 and all chipsel are internal to the chip |
825 | * indicated by chip_sel being NULL or cs_gpios being NULL or |
826 | * set to -ENOENT. num-cs includes internal as well as gpios. |
827 | * indicated by chip_sel being NULL. GPIO based CS is not |
828 | * supported yet in DT bindings. |
829 | */ |
830 | num_cs = 1; |
831 | of_property_read_u32(np: node, propname: "num-cs" , out_value: &num_cs); |
832 | pdata->num_chipselect = num_cs; |
833 | of_property_read_u32(np: node, propname: "ti,davinci-spi-intr-line" , out_value: &intr_line); |
834 | pdata->intr_line = intr_line; |
835 | return 0; |
836 | } |
837 | #else |
838 | static int spi_davinci_get_pdata(struct platform_device *pdev, |
839 | struct davinci_spi *dspi) |
840 | { |
841 | return -ENODEV; |
842 | } |
843 | #endif |
844 | |
845 | /** |
846 | * davinci_spi_probe - probe function for SPI Master Controller |
847 | * @pdev: platform_device structure which contains plateform specific data |
848 | * |
849 | * According to Linux Device Model this function will be invoked by Linux |
850 | * with platform_device struct which contains the device specific info. |
851 | * This function will map the SPI controller's memory, register IRQ, |
852 | * Reset SPI controller and setting its registers to default value. |
853 | * It will invoke spi_bitbang_start to create work queue so that client driver |
854 | * can register transfer method to work queue. |
855 | */ |
856 | static int davinci_spi_probe(struct platform_device *pdev) |
857 | { |
858 | struct spi_controller *host; |
859 | struct davinci_spi *dspi; |
860 | struct davinci_spi_platform_data *pdata; |
861 | struct resource *r; |
862 | int ret = 0; |
863 | u32 spipc0; |
864 | |
865 | host = spi_alloc_host(dev: &pdev->dev, size: sizeof(struct davinci_spi)); |
866 | if (host == NULL) { |
867 | ret = -ENOMEM; |
868 | goto err; |
869 | } |
870 | |
871 | platform_set_drvdata(pdev, data: host); |
872 | |
873 | dspi = spi_controller_get_devdata(ctlr: host); |
874 | |
875 | if (dev_get_platdata(dev: &pdev->dev)) { |
876 | pdata = dev_get_platdata(dev: &pdev->dev); |
877 | dspi->pdata = *pdata; |
878 | } else { |
879 | /* update dspi pdata with that from the DT */ |
880 | ret = spi_davinci_get_pdata(pdev, dspi); |
881 | if (ret < 0) |
882 | goto free_host; |
883 | } |
884 | |
885 | /* pdata in dspi is now updated and point pdata to that */ |
886 | pdata = &dspi->pdata; |
887 | |
888 | dspi->bytes_per_word = devm_kcalloc(dev: &pdev->dev, |
889 | n: pdata->num_chipselect, |
890 | size: sizeof(*dspi->bytes_per_word), |
891 | GFP_KERNEL); |
892 | if (dspi->bytes_per_word == NULL) { |
893 | ret = -ENOMEM; |
894 | goto free_host; |
895 | } |
896 | |
897 | dspi->base = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: &r); |
898 | if (IS_ERR(ptr: dspi->base)) { |
899 | ret = PTR_ERR(ptr: dspi->base); |
900 | goto free_host; |
901 | } |
902 | dspi->pbase = r->start; |
903 | |
904 | init_completion(x: &dspi->done); |
905 | |
906 | ret = platform_get_irq(pdev, 0); |
907 | if (ret < 0) |
908 | goto free_host; |
909 | dspi->irq = ret; |
910 | |
911 | ret = devm_request_threaded_irq(dev: &pdev->dev, irq: dspi->irq, handler: davinci_spi_irq, |
912 | thread_fn: dummy_thread_fn, irqflags: 0, devname: dev_name(dev: &pdev->dev), dev_id: dspi); |
913 | if (ret) |
914 | goto free_host; |
915 | |
916 | dspi->bitbang.ctlr = host; |
917 | |
918 | dspi->clk = devm_clk_get_enabled(dev: &pdev->dev, NULL); |
919 | if (IS_ERR(ptr: dspi->clk)) { |
920 | ret = -ENODEV; |
921 | goto free_host; |
922 | } |
923 | |
924 | host->use_gpio_descriptors = true; |
925 | host->dev.of_node = pdev->dev.of_node; |
926 | host->bus_num = pdev->id; |
927 | host->num_chipselect = pdata->num_chipselect; |
928 | host->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16); |
929 | host->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_GPIO_SS; |
930 | host->setup = davinci_spi_setup; |
931 | host->cleanup = davinci_spi_cleanup; |
932 | host->can_dma = davinci_spi_can_dma; |
933 | |
934 | dspi->bitbang.chipselect = davinci_spi_chipselect; |
935 | dspi->bitbang.setup_transfer = davinci_spi_setup_transfer; |
936 | dspi->prescaler_limit = pdata->prescaler_limit; |
937 | dspi->version = pdata->version; |
938 | |
939 | dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_WORD; |
940 | if (dspi->version == SPI_VERSION_2) |
941 | dspi->bitbang.flags |= SPI_READY; |
942 | |
943 | dspi->bitbang.txrx_bufs = davinci_spi_bufs; |
944 | |
945 | ret = davinci_spi_request_dma(dspi); |
946 | if (ret == -EPROBE_DEFER) { |
947 | goto free_host; |
948 | } else if (ret) { |
949 | dev_info(&pdev->dev, "DMA is not supported (%d)\n" , ret); |
950 | dspi->dma_rx = NULL; |
951 | dspi->dma_tx = NULL; |
952 | } |
953 | |
954 | dspi->get_rx = davinci_spi_rx_buf_u8; |
955 | dspi->get_tx = davinci_spi_tx_buf_u8; |
956 | |
957 | /* Reset In/OUT SPI module */ |
958 | iowrite32(0, dspi->base + SPIGCR0); |
959 | udelay(100); |
960 | iowrite32(1, dspi->base + SPIGCR0); |
961 | |
962 | /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */ |
963 | spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK; |
964 | iowrite32(spipc0, dspi->base + SPIPC0); |
965 | |
966 | if (pdata->intr_line) |
967 | iowrite32(SPI_INTLVL_1, dspi->base + SPILVL); |
968 | else |
969 | iowrite32(SPI_INTLVL_0, dspi->base + SPILVL); |
970 | |
971 | iowrite32(CS_DEFAULT, dspi->base + SPIDEF); |
972 | |
973 | /* host mode default */ |
974 | set_io_bits(addr: dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK); |
975 | set_io_bits(addr: dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK); |
976 | set_io_bits(addr: dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); |
977 | |
978 | ret = spi_bitbang_start(spi: &dspi->bitbang); |
979 | if (ret) |
980 | goto free_dma; |
981 | |
982 | dev_info(&pdev->dev, "Controller at 0x%p\n" , dspi->base); |
983 | |
984 | return ret; |
985 | |
986 | free_dma: |
987 | if (dspi->dma_rx) { |
988 | dma_release_channel(chan: dspi->dma_rx); |
989 | dma_release_channel(chan: dspi->dma_tx); |
990 | } |
991 | free_host: |
992 | spi_controller_put(ctlr: host); |
993 | err: |
994 | return ret; |
995 | } |
996 | |
997 | /** |
998 | * davinci_spi_remove - remove function for SPI Master Controller |
999 | * @pdev: platform_device structure which contains plateform specific data |
1000 | * |
1001 | * This function will do the reverse action of davinci_spi_probe function |
1002 | * It will free the IRQ and SPI controller's memory region. |
1003 | * It will also call spi_bitbang_stop to destroy the work queue which was |
1004 | * created by spi_bitbang_start. |
1005 | */ |
1006 | static void davinci_spi_remove(struct platform_device *pdev) |
1007 | { |
1008 | struct davinci_spi *dspi; |
1009 | struct spi_controller *host; |
1010 | |
1011 | host = platform_get_drvdata(pdev); |
1012 | dspi = spi_controller_get_devdata(ctlr: host); |
1013 | |
1014 | spi_bitbang_stop(spi: &dspi->bitbang); |
1015 | |
1016 | if (dspi->dma_rx) { |
1017 | dma_release_channel(chan: dspi->dma_rx); |
1018 | dma_release_channel(chan: dspi->dma_tx); |
1019 | } |
1020 | |
1021 | spi_controller_put(ctlr: host); |
1022 | } |
1023 | |
1024 | static struct platform_driver davinci_spi_driver = { |
1025 | .driver = { |
1026 | .name = "spi_davinci" , |
1027 | .of_match_table = of_match_ptr(davinci_spi_of_match), |
1028 | }, |
1029 | .probe = davinci_spi_probe, |
1030 | .remove_new = davinci_spi_remove, |
1031 | }; |
1032 | module_platform_driver(davinci_spi_driver); |
1033 | |
1034 | MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver" ); |
1035 | MODULE_LICENSE("GPL" ); |
1036 | |