1// SPDX-License-Identifier: GPL-2.0
2/*
3 * QMC driver
4 *
5 * Copyright 2022 CS GROUP France
6 *
7 * Author: Herve Codina <herve.codina@bootlin.com>
8 */
9
10#include <soc/fsl/qe/qmc.h>
11#include <linux/dma-mapping.h>
12#include <linux/hdlc.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/of_platform.h>
18#include <linux/platform_device.h>
19#include <linux/slab.h>
20#include <soc/fsl/cpm.h>
21#include <sysdev/fsl_soc.h>
22#include "tsa.h"
23
24/* SCC general mode register high (32 bits) */
25#define SCC_GSMRL 0x00
26#define SCC_GSMRL_ENR (1 << 5)
27#define SCC_GSMRL_ENT (1 << 4)
28#define SCC_GSMRL_MODE_QMC (0x0A << 0)
29
30/* SCC general mode register low (32 bits) */
31#define SCC_GSMRH 0x04
32#define SCC_GSMRH_CTSS (1 << 7)
33#define SCC_GSMRH_CDS (1 << 8)
34#define SCC_GSMRH_CTSP (1 << 9)
35#define SCC_GSMRH_CDP (1 << 10)
36
37/* SCC event register (16 bits) */
38#define SCC_SCCE 0x10
39#define SCC_SCCE_IQOV (1 << 3)
40#define SCC_SCCE_GINT (1 << 2)
41#define SCC_SCCE_GUN (1 << 1)
42#define SCC_SCCE_GOV (1 << 0)
43
44/* SCC mask register (16 bits) */
45#define SCC_SCCM 0x14
46/* Multichannel base pointer (32 bits) */
47#define QMC_GBL_MCBASE 0x00
48/* Multichannel controller state (16 bits) */
49#define QMC_GBL_QMCSTATE 0x04
50/* Maximum receive buffer length (16 bits) */
51#define QMC_GBL_MRBLR 0x06
52/* Tx time-slot assignment table pointer (16 bits) */
53#define QMC_GBL_TX_S_PTR 0x08
54/* Rx pointer (16 bits) */
55#define QMC_GBL_RXPTR 0x0A
56/* Global receive frame threshold (16 bits) */
57#define QMC_GBL_GRFTHR 0x0C
58/* Global receive frame count (16 bits) */
59#define QMC_GBL_GRFCNT 0x0E
60/* Multichannel interrupt base address (32 bits) */
61#define QMC_GBL_INTBASE 0x10
62/* Multichannel interrupt pointer (32 bits) */
63#define QMC_GBL_INTPTR 0x14
64/* Rx time-slot assignment table pointer (16 bits) */
65#define QMC_GBL_RX_S_PTR 0x18
66/* Tx pointer (16 bits) */
67#define QMC_GBL_TXPTR 0x1A
68/* CRC constant (32 bits) */
69#define QMC_GBL_C_MASK32 0x1C
70/* Time slot assignment table Rx (32 x 16 bits) */
71#define QMC_GBL_TSATRX 0x20
72/* Time slot assignment table Tx (32 x 16 bits) */
73#define QMC_GBL_TSATTX 0x60
74/* CRC constant (16 bits) */
75#define QMC_GBL_C_MASK16 0xA0
76
77/* TSA entry (16bit entry in TSATRX and TSATTX) */
78#define QMC_TSA_VALID (1 << 15)
79#define QMC_TSA_WRAP (1 << 14)
80#define QMC_TSA_MASK (0x303F)
81#define QMC_TSA_CHANNEL(x) ((x) << 6)
82
83/* Tx buffer descriptor base address (16 bits, offset from MCBASE) */
84#define QMC_SPE_TBASE 0x00
85
86/* Channel mode register (16 bits) */
87#define QMC_SPE_CHAMR 0x02
88#define QMC_SPE_CHAMR_MODE_HDLC (1 << 15)
89#define QMC_SPE_CHAMR_MODE_TRANSP ((0 << 15) | (1 << 13))
90#define QMC_SPE_CHAMR_ENT (1 << 12)
91#define QMC_SPE_CHAMR_POL (1 << 8)
92#define QMC_SPE_CHAMR_HDLC_IDLM (1 << 13)
93#define QMC_SPE_CHAMR_HDLC_CRC (1 << 7)
94#define QMC_SPE_CHAMR_HDLC_NOF (0x0f << 0)
95#define QMC_SPE_CHAMR_TRANSP_RD (1 << 14)
96#define QMC_SPE_CHAMR_TRANSP_SYNC (1 << 10)
97
98/* Tx internal state (32 bits) */
99#define QMC_SPE_TSTATE 0x04
100/* Tx buffer descriptor pointer (16 bits) */
101#define QMC_SPE_TBPTR 0x0C
102/* Zero-insertion state (32 bits) */
103#define QMC_SPE_ZISTATE 0x14
104/* Channel’s interrupt mask flags (16 bits) */
105#define QMC_SPE_INTMSK 0x1C
106/* Rx buffer descriptor base address (16 bits, offset from MCBASE) */
107#define QMC_SPE_RBASE 0x20
108/* HDLC: Maximum frame length register (16 bits) */
109#define QMC_SPE_MFLR 0x22
110/* TRANSPARENT: Transparent maximum receive length (16 bits) */
111#define QMC_SPE_TMRBLR 0x22
112/* Rx internal state (32 bits) */
113#define QMC_SPE_RSTATE 0x24
114/* Rx buffer descriptor pointer (16 bits) */
115#define QMC_SPE_RBPTR 0x2C
116/* Packs 4 bytes to 1 long word before writing to buffer (32 bits) */
117#define QMC_SPE_RPACK 0x30
118/* Zero deletion state (32 bits) */
119#define QMC_SPE_ZDSTATE 0x34
120
121/* Transparent synchronization (16 bits) */
122#define QMC_SPE_TRNSYNC 0x3C
123#define QMC_SPE_TRNSYNC_RX(x) ((x) << 8)
124#define QMC_SPE_TRNSYNC_TX(x) ((x) << 0)
125
126/* Interrupt related registers bits */
127#define QMC_INT_V (1 << 15)
128#define QMC_INT_W (1 << 14)
129#define QMC_INT_NID (1 << 13)
130#define QMC_INT_IDL (1 << 12)
131#define QMC_INT_GET_CHANNEL(x) (((x) & 0x0FC0) >> 6)
132#define QMC_INT_MRF (1 << 5)
133#define QMC_INT_UN (1 << 4)
134#define QMC_INT_RXF (1 << 3)
135#define QMC_INT_BSY (1 << 2)
136#define QMC_INT_TXB (1 << 1)
137#define QMC_INT_RXB (1 << 0)
138
139/* BD related registers bits */
140#define QMC_BD_RX_E (1 << 15)
141#define QMC_BD_RX_W (1 << 13)
142#define QMC_BD_RX_I (1 << 12)
143#define QMC_BD_RX_L (1 << 11)
144#define QMC_BD_RX_F (1 << 10)
145#define QMC_BD_RX_CM (1 << 9)
146#define QMC_BD_RX_UB (1 << 7)
147#define QMC_BD_RX_LG (1 << 5)
148#define QMC_BD_RX_NO (1 << 4)
149#define QMC_BD_RX_AB (1 << 3)
150#define QMC_BD_RX_CR (1 << 2)
151
152#define QMC_BD_TX_R (1 << 15)
153#define QMC_BD_TX_W (1 << 13)
154#define QMC_BD_TX_I (1 << 12)
155#define QMC_BD_TX_L (1 << 11)
156#define QMC_BD_TX_TC (1 << 10)
157#define QMC_BD_TX_CM (1 << 9)
158#define QMC_BD_TX_UB (1 << 7)
159#define QMC_BD_TX_PAD (0x0f << 0)
160
161/* Numbers of BDs and interrupt items */
162#define QMC_NB_TXBDS 8
163#define QMC_NB_RXBDS 8
164#define QMC_NB_INTS 128
165
166struct qmc_xfer_desc {
167 union {
168 void (*tx_complete)(void *context);
169 void (*rx_complete)(void *context, size_t length, unsigned int flags);
170 };
171 void *context;
172};
173
174struct qmc_chan {
175 struct list_head list;
176 unsigned int id;
177 struct qmc *qmc;
178 void __iomem *s_param;
179 enum qmc_mode mode;
180 spinlock_t ts_lock; /* Protect timeslots */
181 u64 tx_ts_mask_avail;
182 u64 tx_ts_mask;
183 u64 rx_ts_mask_avail;
184 u64 rx_ts_mask;
185 bool is_reverse_data;
186
187 spinlock_t tx_lock;
188 cbd_t __iomem *txbds;
189 cbd_t __iomem *txbd_free;
190 cbd_t __iomem *txbd_done;
191 struct qmc_xfer_desc tx_desc[QMC_NB_TXBDS];
192 u64 nb_tx_underrun;
193 bool is_tx_stopped;
194
195 spinlock_t rx_lock;
196 cbd_t __iomem *rxbds;
197 cbd_t __iomem *rxbd_free;
198 cbd_t __iomem *rxbd_done;
199 struct qmc_xfer_desc rx_desc[QMC_NB_RXBDS];
200 u64 nb_rx_busy;
201 int rx_pending;
202 bool is_rx_halted;
203 bool is_rx_stopped;
204};
205
206struct qmc {
207 struct device *dev;
208 struct tsa_serial *tsa_serial;
209 void __iomem *scc_regs;
210 void __iomem *scc_pram;
211 void __iomem *dpram;
212 u16 scc_pram_offset;
213 cbd_t __iomem *bd_table;
214 dma_addr_t bd_dma_addr;
215 size_t bd_size;
216 u16 __iomem *int_table;
217 u16 __iomem *int_curr;
218 dma_addr_t int_dma_addr;
219 size_t int_size;
220 bool is_tsa_64rxtx;
221 struct list_head chan_head;
222 struct qmc_chan *chans[64];
223};
224
225static void qmc_write16(void __iomem *addr, u16 val)
226{
227 iowrite16be(val, addr);
228}
229
230static u16 qmc_read16(void __iomem *addr)
231{
232 return ioread16be(addr);
233}
234
235static void qmc_setbits16(void __iomem *addr, u16 set)
236{
237 qmc_write16(addr, val: qmc_read16(addr) | set);
238}
239
240static void qmc_clrbits16(void __iomem *addr, u16 clr)
241{
242 qmc_write16(addr, val: qmc_read16(addr) & ~clr);
243}
244
245static void qmc_clrsetbits16(void __iomem *addr, u16 clr, u16 set)
246{
247 qmc_write16(addr, val: (qmc_read16(addr) & ~clr) | set);
248}
249
250static void qmc_write32(void __iomem *addr, u32 val)
251{
252 iowrite32be(val, addr);
253}
254
255static u32 qmc_read32(void __iomem *addr)
256{
257 return ioread32be(addr);
258}
259
260static void qmc_setbits32(void __iomem *addr, u32 set)
261{
262 qmc_write32(addr, val: qmc_read32(addr) | set);
263}
264
265
266int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
267{
268 struct tsa_serial_info tsa_info;
269 unsigned long flags;
270 int ret;
271
272 /* Retrieve info from the TSA related serial */
273 ret = tsa_serial_get_info(tsa_serial: chan->qmc->tsa_serial, info: &tsa_info);
274 if (ret)
275 return ret;
276
277 spin_lock_irqsave(&chan->ts_lock, flags);
278
279 info->mode = chan->mode;
280 info->rx_fs_rate = tsa_info.rx_fs_rate;
281 info->rx_bit_rate = tsa_info.rx_bit_rate;
282 info->nb_tx_ts = hweight64(chan->tx_ts_mask);
283 info->tx_fs_rate = tsa_info.tx_fs_rate;
284 info->tx_bit_rate = tsa_info.tx_bit_rate;
285 info->nb_rx_ts = hweight64(chan->rx_ts_mask);
286
287 spin_unlock_irqrestore(lock: &chan->ts_lock, flags);
288
289 return 0;
290}
291EXPORT_SYMBOL(qmc_chan_get_info);
292
293int qmc_chan_get_ts_info(struct qmc_chan *chan, struct qmc_chan_ts_info *ts_info)
294{
295 unsigned long flags;
296
297 spin_lock_irqsave(&chan->ts_lock, flags);
298
299 ts_info->rx_ts_mask_avail = chan->rx_ts_mask_avail;
300 ts_info->tx_ts_mask_avail = chan->tx_ts_mask_avail;
301 ts_info->rx_ts_mask = chan->rx_ts_mask;
302 ts_info->tx_ts_mask = chan->tx_ts_mask;
303
304 spin_unlock_irqrestore(lock: &chan->ts_lock, flags);
305
306 return 0;
307}
308EXPORT_SYMBOL(qmc_chan_get_ts_info);
309
310int qmc_chan_set_ts_info(struct qmc_chan *chan, const struct qmc_chan_ts_info *ts_info)
311{
312 unsigned long flags;
313 int ret;
314
315 /* Only a subset of available timeslots is allowed */
316 if ((ts_info->rx_ts_mask & chan->rx_ts_mask_avail) != ts_info->rx_ts_mask)
317 return -EINVAL;
318 if ((ts_info->tx_ts_mask & chan->tx_ts_mask_avail) != ts_info->tx_ts_mask)
319 return -EINVAL;
320
321 /* In case of common rx/tx table, rx/tx masks must be identical */
322 if (chan->qmc->is_tsa_64rxtx) {
323 if (ts_info->rx_ts_mask != ts_info->tx_ts_mask)
324 return -EINVAL;
325 }
326
327 spin_lock_irqsave(&chan->ts_lock, flags);
328
329 if ((chan->tx_ts_mask != ts_info->tx_ts_mask && !chan->is_tx_stopped) ||
330 (chan->rx_ts_mask != ts_info->rx_ts_mask && !chan->is_rx_stopped)) {
331 dev_err(chan->qmc->dev, "Channel rx and/or tx not stopped\n");
332 ret = -EBUSY;
333 } else {
334 chan->tx_ts_mask = ts_info->tx_ts_mask;
335 chan->rx_ts_mask = ts_info->rx_ts_mask;
336 ret = 0;
337 }
338 spin_unlock_irqrestore(lock: &chan->ts_lock, flags);
339
340 return ret;
341}
342EXPORT_SYMBOL(qmc_chan_set_ts_info);
343
344int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param)
345{
346 if (param->mode != chan->mode)
347 return -EINVAL;
348
349 switch (param->mode) {
350 case QMC_HDLC:
351 if ((param->hdlc.max_rx_buf_size % 4) ||
352 (param->hdlc.max_rx_buf_size < 8))
353 return -EINVAL;
354
355 qmc_write16(addr: chan->qmc->scc_pram + QMC_GBL_MRBLR,
356 val: param->hdlc.max_rx_buf_size - 8);
357 qmc_write16(addr: chan->s_param + QMC_SPE_MFLR,
358 val: param->hdlc.max_rx_frame_size);
359 if (param->hdlc.is_crc32) {
360 qmc_setbits16(addr: chan->s_param + QMC_SPE_CHAMR,
361 QMC_SPE_CHAMR_HDLC_CRC);
362 } else {
363 qmc_clrbits16(addr: chan->s_param + QMC_SPE_CHAMR,
364 QMC_SPE_CHAMR_HDLC_CRC);
365 }
366 break;
367
368 case QMC_TRANSPARENT:
369 qmc_write16(addr: chan->s_param + QMC_SPE_TMRBLR,
370 val: param->transp.max_rx_buf_size);
371 break;
372
373 default:
374 return -EINVAL;
375 }
376
377 return 0;
378}
379EXPORT_SYMBOL(qmc_chan_set_param);
380
381int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
382 void (*complete)(void *context), void *context)
383{
384 struct qmc_xfer_desc *xfer_desc;
385 unsigned long flags;
386 cbd_t __iomem *bd;
387 u16 ctrl;
388 int ret;
389
390 /*
391 * R bit UB bit
392 * 0 0 : The BD is free
393 * 1 1 : The BD is in used, waiting for transfer
394 * 0 1 : The BD is in used, waiting for completion
395 * 1 0 : Should not append
396 */
397
398 spin_lock_irqsave(&chan->tx_lock, flags);
399 bd = chan->txbd_free;
400
401 ctrl = qmc_read16(addr: &bd->cbd_sc);
402 if (ctrl & (QMC_BD_TX_R | QMC_BD_TX_UB)) {
403 /* We are full ... */
404 ret = -EBUSY;
405 goto end;
406 }
407
408 qmc_write16(addr: &bd->cbd_datlen, val: length);
409 qmc_write32(addr: &bd->cbd_bufaddr, val: addr);
410
411 xfer_desc = &chan->tx_desc[bd - chan->txbds];
412 xfer_desc->tx_complete = complete;
413 xfer_desc->context = context;
414
415 /* Activate the descriptor */
416 ctrl |= (QMC_BD_TX_R | QMC_BD_TX_UB);
417 wmb(); /* Be sure to flush the descriptor before control update */
418 qmc_write16(addr: &bd->cbd_sc, val: ctrl);
419
420 if (!chan->is_tx_stopped)
421 qmc_setbits16(addr: chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
422
423 if (ctrl & QMC_BD_TX_W)
424 chan->txbd_free = chan->txbds;
425 else
426 chan->txbd_free++;
427
428 ret = 0;
429
430end:
431 spin_unlock_irqrestore(lock: &chan->tx_lock, flags);
432 return ret;
433}
434EXPORT_SYMBOL(qmc_chan_write_submit);
435
436static void qmc_chan_write_done(struct qmc_chan *chan)
437{
438 struct qmc_xfer_desc *xfer_desc;
439 void (*complete)(void *context);
440 unsigned long flags;
441 void *context;
442 cbd_t __iomem *bd;
443 u16 ctrl;
444
445 /*
446 * R bit UB bit
447 * 0 0 : The BD is free
448 * 1 1 : The BD is in used, waiting for transfer
449 * 0 1 : The BD is in used, waiting for completion
450 * 1 0 : Should not append
451 */
452
453 spin_lock_irqsave(&chan->tx_lock, flags);
454 bd = chan->txbd_done;
455
456 ctrl = qmc_read16(addr: &bd->cbd_sc);
457 while (!(ctrl & QMC_BD_TX_R)) {
458 if (!(ctrl & QMC_BD_TX_UB))
459 goto end;
460
461 xfer_desc = &chan->tx_desc[bd - chan->txbds];
462 complete = xfer_desc->tx_complete;
463 context = xfer_desc->context;
464 xfer_desc->tx_complete = NULL;
465 xfer_desc->context = NULL;
466
467 qmc_write16(addr: &bd->cbd_sc, val: ctrl & ~QMC_BD_TX_UB);
468
469 if (ctrl & QMC_BD_TX_W)
470 chan->txbd_done = chan->txbds;
471 else
472 chan->txbd_done++;
473
474 if (complete) {
475 spin_unlock_irqrestore(lock: &chan->tx_lock, flags);
476 complete(context);
477 spin_lock_irqsave(&chan->tx_lock, flags);
478 }
479
480 bd = chan->txbd_done;
481 ctrl = qmc_read16(addr: &bd->cbd_sc);
482 }
483
484end:
485 spin_unlock_irqrestore(lock: &chan->tx_lock, flags);
486}
487
488int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
489 void (*complete)(void *context, size_t length, unsigned int flags),
490 void *context)
491{
492 struct qmc_xfer_desc *xfer_desc;
493 unsigned long flags;
494 cbd_t __iomem *bd;
495 u16 ctrl;
496 int ret;
497
498 /*
499 * E bit UB bit
500 * 0 0 : The BD is free
501 * 1 1 : The BD is in used, waiting for transfer
502 * 0 1 : The BD is in used, waiting for completion
503 * 1 0 : Should not append
504 */
505
506 spin_lock_irqsave(&chan->rx_lock, flags);
507 bd = chan->rxbd_free;
508
509 ctrl = qmc_read16(addr: &bd->cbd_sc);
510 if (ctrl & (QMC_BD_RX_E | QMC_BD_RX_UB)) {
511 /* We are full ... */
512 ret = -EBUSY;
513 goto end;
514 }
515
516 qmc_write16(addr: &bd->cbd_datlen, val: 0); /* data length is updated by the QMC */
517 qmc_write32(addr: &bd->cbd_bufaddr, val: addr);
518
519 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
520 xfer_desc->rx_complete = complete;
521 xfer_desc->context = context;
522
523 /* Clear previous status flags */
524 ctrl &= ~(QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG | QMC_BD_RX_NO |
525 QMC_BD_RX_AB | QMC_BD_RX_CR);
526
527 /* Activate the descriptor */
528 ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
529 wmb(); /* Be sure to flush data before descriptor activation */
530 qmc_write16(addr: &bd->cbd_sc, val: ctrl);
531
532 /* Restart receiver if needed */
533 if (chan->is_rx_halted && !chan->is_rx_stopped) {
534 /* Restart receiver */
535 if (chan->mode == QMC_TRANSPARENT)
536 qmc_write32(addr: chan->s_param + QMC_SPE_ZDSTATE, val: 0x18000080);
537 else
538 qmc_write32(addr: chan->s_param + QMC_SPE_ZDSTATE, val: 0x00000080);
539 qmc_write32(addr: chan->s_param + QMC_SPE_RSTATE, val: 0x31000000);
540 chan->is_rx_halted = false;
541 }
542 chan->rx_pending++;
543
544 if (ctrl & QMC_BD_RX_W)
545 chan->rxbd_free = chan->rxbds;
546 else
547 chan->rxbd_free++;
548
549 ret = 0;
550end:
551 spin_unlock_irqrestore(lock: &chan->rx_lock, flags);
552 return ret;
553}
554EXPORT_SYMBOL(qmc_chan_read_submit);
555
556static void qmc_chan_read_done(struct qmc_chan *chan)
557{
558 void (*complete)(void *context, size_t size, unsigned int flags);
559 struct qmc_xfer_desc *xfer_desc;
560 unsigned long flags;
561 cbd_t __iomem *bd;
562 void *context;
563 u16 datalen;
564 u16 ctrl;
565
566 /*
567 * E bit UB bit
568 * 0 0 : The BD is free
569 * 1 1 : The BD is in used, waiting for transfer
570 * 0 1 : The BD is in used, waiting for completion
571 * 1 0 : Should not append
572 */
573
574 spin_lock_irqsave(&chan->rx_lock, flags);
575 bd = chan->rxbd_done;
576
577 ctrl = qmc_read16(addr: &bd->cbd_sc);
578 while (!(ctrl & QMC_BD_RX_E)) {
579 if (!(ctrl & QMC_BD_RX_UB))
580 goto end;
581
582 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
583 complete = xfer_desc->rx_complete;
584 context = xfer_desc->context;
585 xfer_desc->rx_complete = NULL;
586 xfer_desc->context = NULL;
587
588 datalen = qmc_read16(addr: &bd->cbd_datlen);
589 qmc_write16(addr: &bd->cbd_sc, val: ctrl & ~QMC_BD_RX_UB);
590
591 if (ctrl & QMC_BD_RX_W)
592 chan->rxbd_done = chan->rxbds;
593 else
594 chan->rxbd_done++;
595
596 chan->rx_pending--;
597
598 if (complete) {
599 spin_unlock_irqrestore(lock: &chan->rx_lock, flags);
600
601 /*
602 * Avoid conversion between internal hardware flags and
603 * the software API flags.
604 * -> Be sure that the software API flags are consistent
605 * with the hardware flags
606 */
607 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_LAST != QMC_BD_RX_L);
608 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_FIRST != QMC_BD_RX_F);
609 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_OVF != QMC_BD_RX_LG);
610 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_UNA != QMC_BD_RX_NO);
611 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_ABORT != QMC_BD_RX_AB);
612 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_CRC != QMC_BD_RX_CR);
613
614 complete(context, datalen,
615 ctrl & (QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG |
616 QMC_BD_RX_NO | QMC_BD_RX_AB | QMC_BD_RX_CR));
617 spin_lock_irqsave(&chan->rx_lock, flags);
618 }
619
620 bd = chan->rxbd_done;
621 ctrl = qmc_read16(addr: &bd->cbd_sc);
622 }
623
624end:
625 spin_unlock_irqrestore(lock: &chan->rx_lock, flags);
626}
627
628static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_serial_info *info,
629 bool enable)
630{
631 unsigned int i;
632 u16 curr;
633 u16 val;
634
635 /*
636 * Use a common Tx/Rx 64 entries table.
637 * Tx and Rx related stuffs must be identical
638 */
639 if (chan->tx_ts_mask != chan->rx_ts_mask) {
640 dev_err(chan->qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
641 return -EINVAL;
642 }
643
644 val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
645
646 /* Check entries based on Rx stuff*/
647 for (i = 0; i < info->nb_rx_ts; i++) {
648 if (!(chan->rx_ts_mask & (((u64)1) << i)))
649 continue;
650
651 curr = qmc_read16(addr: chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
652 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
653 dev_err(chan->qmc->dev, "chan %u TxRx entry %d already used\n",
654 chan->id, i);
655 return -EBUSY;
656 }
657 }
658
659 /* Set entries based on Rx stuff*/
660 for (i = 0; i < info->nb_rx_ts; i++) {
661 if (!(chan->rx_ts_mask & (((u64)1) << i)))
662 continue;
663
664 qmc_clrsetbits16(addr: chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
665 clr: ~QMC_TSA_WRAP, set: enable ? val : 0x0000);
666 }
667
668 return 0;
669}
670
671static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_serial_info *info,
672 bool enable)
673{
674 unsigned int i;
675 u16 curr;
676 u16 val;
677
678 /* Use a Rx 32 entries table */
679
680 val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
681
682 /* Check entries based on Rx stuff */
683 for (i = 0; i < info->nb_rx_ts; i++) {
684 if (!(chan->rx_ts_mask & (((u64)1) << i)))
685 continue;
686
687 curr = qmc_read16(addr: chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
688 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
689 dev_err(chan->qmc->dev, "chan %u Rx entry %d already used\n",
690 chan->id, i);
691 return -EBUSY;
692 }
693 }
694
695 /* Set entries based on Rx stuff */
696 for (i = 0; i < info->nb_rx_ts; i++) {
697 if (!(chan->rx_ts_mask & (((u64)1) << i)))
698 continue;
699
700 qmc_clrsetbits16(addr: chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
701 clr: ~QMC_TSA_WRAP, set: enable ? val : 0x0000);
702 }
703
704 return 0;
705}
706
707static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_serial_info *info,
708 bool enable)
709{
710 unsigned int i;
711 u16 curr;
712 u16 val;
713
714 /* Use a Tx 32 entries table */
715
716 val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
717
718 /* Check entries based on Tx stuff */
719 for (i = 0; i < info->nb_tx_ts; i++) {
720 if (!(chan->tx_ts_mask & (((u64)1) << i)))
721 continue;
722
723 curr = qmc_read16(addr: chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2));
724 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
725 dev_err(chan->qmc->dev, "chan %u Tx entry %d already used\n",
726 chan->id, i);
727 return -EBUSY;
728 }
729 }
730
731 /* Set entries based on Tx stuff */
732 for (i = 0; i < info->nb_tx_ts; i++) {
733 if (!(chan->tx_ts_mask & (((u64)1) << i)))
734 continue;
735
736 qmc_clrsetbits16(addr: chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2),
737 clr: ~QMC_TSA_WRAP, set: enable ? val : 0x0000);
738 }
739
740 return 0;
741}
742
743static int qmc_chan_setup_tsa_tx(struct qmc_chan *chan, bool enable)
744{
745 struct tsa_serial_info info;
746 int ret;
747
748 /* Retrieve info from the TSA related serial */
749 ret = tsa_serial_get_info(tsa_serial: chan->qmc->tsa_serial, info: &info);
750 if (ret)
751 return ret;
752
753 /* Setup entries */
754 if (chan->qmc->is_tsa_64rxtx)
755 return qmc_chan_setup_tsa_64rxtx(chan, info: &info, enable);
756
757 return qmc_chan_setup_tsa_32tx(chan, info: &info, enable);
758}
759
760static int qmc_chan_setup_tsa_rx(struct qmc_chan *chan, bool enable)
761{
762 struct tsa_serial_info info;
763 int ret;
764
765 /* Retrieve info from the TSA related serial */
766 ret = tsa_serial_get_info(tsa_serial: chan->qmc->tsa_serial, info: &info);
767 if (ret)
768 return ret;
769
770 /* Setup entries */
771 if (chan->qmc->is_tsa_64rxtx)
772 return qmc_chan_setup_tsa_64rxtx(chan, info: &info, enable);
773
774 return qmc_chan_setup_tsa_32rx(chan, info: &info, enable);
775}
776
777static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode)
778{
779 return cpm_command(command: chan->id << 2, opcode: (qmc_opcode << 4) | 0x0E);
780}
781
782static int qmc_chan_stop_rx(struct qmc_chan *chan)
783{
784 unsigned long flags;
785 int ret;
786
787 spin_lock_irqsave(&chan->rx_lock, flags);
788
789 if (chan->is_rx_stopped) {
790 /* The channel is already stopped -> simply return ok */
791 ret = 0;
792 goto end;
793 }
794
795 /* Send STOP RECEIVE command */
796 ret = qmc_chan_command(chan, qmc_opcode: 0x0);
797 if (ret) {
798 dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n",
799 chan->id, ret);
800 goto end;
801 }
802
803 chan->is_rx_stopped = true;
804
805 if (!chan->qmc->is_tsa_64rxtx || chan->is_tx_stopped) {
806 ret = qmc_chan_setup_tsa_rx(chan, enable: false);
807 if (ret) {
808 dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
809 chan->id, ret);
810 goto end;
811 }
812 }
813
814end:
815 spin_unlock_irqrestore(lock: &chan->rx_lock, flags);
816 return ret;
817}
818
819static int qmc_chan_stop_tx(struct qmc_chan *chan)
820{
821 unsigned long flags;
822 int ret;
823
824 spin_lock_irqsave(&chan->tx_lock, flags);
825
826 if (chan->is_tx_stopped) {
827 /* The channel is already stopped -> simply return ok */
828 ret = 0;
829 goto end;
830 }
831
832 /* Send STOP TRANSMIT command */
833 ret = qmc_chan_command(chan, qmc_opcode: 0x1);
834 if (ret) {
835 dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n",
836 chan->id, ret);
837 goto end;
838 }
839
840 chan->is_tx_stopped = true;
841
842 if (!chan->qmc->is_tsa_64rxtx || chan->is_rx_stopped) {
843 ret = qmc_chan_setup_tsa_tx(chan, enable: false);
844 if (ret) {
845 dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
846 chan->id, ret);
847 goto end;
848 }
849 }
850
851end:
852 spin_unlock_irqrestore(lock: &chan->tx_lock, flags);
853 return ret;
854}
855
856static int qmc_chan_start_rx(struct qmc_chan *chan);
857
858int qmc_chan_stop(struct qmc_chan *chan, int direction)
859{
860 bool is_rx_rollback_needed = false;
861 unsigned long flags;
862 int ret = 0;
863
864 spin_lock_irqsave(&chan->ts_lock, flags);
865
866 if (direction & QMC_CHAN_READ) {
867 is_rx_rollback_needed = !chan->is_rx_stopped;
868 ret = qmc_chan_stop_rx(chan);
869 if (ret)
870 goto end;
871 }
872
873 if (direction & QMC_CHAN_WRITE) {
874 ret = qmc_chan_stop_tx(chan);
875 if (ret) {
876 /* Restart rx if needed */
877 if (is_rx_rollback_needed)
878 qmc_chan_start_rx(chan);
879 goto end;
880 }
881 }
882
883end:
884 spin_unlock_irqrestore(lock: &chan->ts_lock, flags);
885 return ret;
886}
887EXPORT_SYMBOL(qmc_chan_stop);
888
889static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
890{
891 struct tsa_serial_info info;
892 u16 first_rx, last_tx;
893 u16 trnsync;
894 int ret;
895
896 /* Retrieve info from the TSA related serial */
897 ret = tsa_serial_get_info(tsa_serial: chan->qmc->tsa_serial, info: &info);
898 if (ret)
899 return ret;
900
901 /* Find the first Rx TS allocated to the channel */
902 first_rx = chan->rx_ts_mask ? __ffs64(word: chan->rx_ts_mask) + 1 : 0;
903
904 /* Find the last Tx TS allocated to the channel */
905 last_tx = fls64(x: chan->tx_ts_mask);
906
907 trnsync = 0;
908 if (info.nb_rx_ts)
909 trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
910 if (info.nb_tx_ts)
911 trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
912
913 qmc_write16(addr: chan->s_param + QMC_SPE_TRNSYNC, val: trnsync);
914
915 dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
916 chan->id, trnsync,
917 first_rx, info.nb_rx_ts, chan->rx_ts_mask,
918 last_tx, info.nb_tx_ts, chan->tx_ts_mask);
919
920 return 0;
921}
922
923static int qmc_chan_start_rx(struct qmc_chan *chan)
924{
925 unsigned long flags;
926 int ret;
927
928 spin_lock_irqsave(&chan->rx_lock, flags);
929
930 if (!chan->is_rx_stopped) {
931 /* The channel is already started -> simply return ok */
932 ret = 0;
933 goto end;
934 }
935
936 ret = qmc_chan_setup_tsa_rx(chan, enable: true);
937 if (ret) {
938 dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
939 chan->id, ret);
940 goto end;
941 }
942
943 ret = qmc_setup_chan_trnsync(qmc: chan->qmc, chan);
944 if (ret) {
945 dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
946 chan->id, ret);
947 goto end;
948 }
949
950 /* Restart the receiver */
951 if (chan->mode == QMC_TRANSPARENT)
952 qmc_write32(addr: chan->s_param + QMC_SPE_ZDSTATE, val: 0x18000080);
953 else
954 qmc_write32(addr: chan->s_param + QMC_SPE_ZDSTATE, val: 0x00000080);
955 qmc_write32(addr: chan->s_param + QMC_SPE_RSTATE, val: 0x31000000);
956 chan->is_rx_halted = false;
957
958 chan->is_rx_stopped = false;
959
960end:
961 spin_unlock_irqrestore(lock: &chan->rx_lock, flags);
962 return ret;
963}
964
965static int qmc_chan_start_tx(struct qmc_chan *chan)
966{
967 unsigned long flags;
968 int ret;
969
970 spin_lock_irqsave(&chan->tx_lock, flags);
971
972 if (!chan->is_tx_stopped) {
973 /* The channel is already started -> simply return ok */
974 ret = 0;
975 goto end;
976 }
977
978 ret = qmc_chan_setup_tsa_tx(chan, enable: true);
979 if (ret) {
980 dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
981 chan->id, ret);
982 goto end;
983 }
984
985 ret = qmc_setup_chan_trnsync(qmc: chan->qmc, chan);
986 if (ret) {
987 dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
988 chan->id, ret);
989 goto end;
990 }
991
992 /*
993 * Enable channel transmitter as it could be disabled if
994 * qmc_chan_reset() was called.
995 */
996 qmc_setbits16(addr: chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
997
998 /* Set the POL bit in the channel mode register */
999 qmc_setbits16(addr: chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
1000
1001 chan->is_tx_stopped = false;
1002
1003end:
1004 spin_unlock_irqrestore(lock: &chan->tx_lock, flags);
1005 return ret;
1006}
1007
1008int qmc_chan_start(struct qmc_chan *chan, int direction)
1009{
1010 bool is_rx_rollback_needed = false;
1011 unsigned long flags;
1012 int ret = 0;
1013
1014 spin_lock_irqsave(&chan->ts_lock, flags);
1015
1016 if (direction & QMC_CHAN_READ) {
1017 is_rx_rollback_needed = chan->is_rx_stopped;
1018 ret = qmc_chan_start_rx(chan);
1019 if (ret)
1020 goto end;
1021 }
1022
1023 if (direction & QMC_CHAN_WRITE) {
1024 ret = qmc_chan_start_tx(chan);
1025 if (ret) {
1026 /* Restop rx if needed */
1027 if (is_rx_rollback_needed)
1028 qmc_chan_stop_rx(chan);
1029 goto end;
1030 }
1031 }
1032
1033end:
1034 spin_unlock_irqrestore(lock: &chan->ts_lock, flags);
1035 return ret;
1036}
1037EXPORT_SYMBOL(qmc_chan_start);
1038
1039static void qmc_chan_reset_rx(struct qmc_chan *chan)
1040{
1041 struct qmc_xfer_desc *xfer_desc;
1042 unsigned long flags;
1043 cbd_t __iomem *bd;
1044 u16 ctrl;
1045
1046 spin_lock_irqsave(&chan->rx_lock, flags);
1047 bd = chan->rxbds;
1048 do {
1049 ctrl = qmc_read16(addr: &bd->cbd_sc);
1050 qmc_write16(addr: &bd->cbd_sc, val: ctrl & ~(QMC_BD_RX_UB | QMC_BD_RX_E));
1051
1052 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
1053 xfer_desc->rx_complete = NULL;
1054 xfer_desc->context = NULL;
1055
1056 bd++;
1057 } while (!(ctrl & QMC_BD_RX_W));
1058
1059 chan->rxbd_free = chan->rxbds;
1060 chan->rxbd_done = chan->rxbds;
1061 qmc_write16(addr: chan->s_param + QMC_SPE_RBPTR,
1062 val: qmc_read16(addr: chan->s_param + QMC_SPE_RBASE));
1063
1064 chan->rx_pending = 0;
1065
1066 spin_unlock_irqrestore(lock: &chan->rx_lock, flags);
1067}
1068
1069static void qmc_chan_reset_tx(struct qmc_chan *chan)
1070{
1071 struct qmc_xfer_desc *xfer_desc;
1072 unsigned long flags;
1073 cbd_t __iomem *bd;
1074 u16 ctrl;
1075
1076 spin_lock_irqsave(&chan->tx_lock, flags);
1077
1078 /* Disable transmitter. It will be re-enable on qmc_chan_start() */
1079 qmc_clrbits16(addr: chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
1080
1081 bd = chan->txbds;
1082 do {
1083 ctrl = qmc_read16(addr: &bd->cbd_sc);
1084 qmc_write16(addr: &bd->cbd_sc, val: ctrl & ~(QMC_BD_TX_UB | QMC_BD_TX_R));
1085
1086 xfer_desc = &chan->tx_desc[bd - chan->txbds];
1087 xfer_desc->tx_complete = NULL;
1088 xfer_desc->context = NULL;
1089
1090 bd++;
1091 } while (!(ctrl & QMC_BD_TX_W));
1092
1093 chan->txbd_free = chan->txbds;
1094 chan->txbd_done = chan->txbds;
1095 qmc_write16(addr: chan->s_param + QMC_SPE_TBPTR,
1096 val: qmc_read16(addr: chan->s_param + QMC_SPE_TBASE));
1097
1098 /* Reset TSTATE and ZISTATE to their initial value */
1099 qmc_write32(addr: chan->s_param + QMC_SPE_TSTATE, val: 0x30000000);
1100 qmc_write32(addr: chan->s_param + QMC_SPE_ZISTATE, val: 0x00000100);
1101
1102 spin_unlock_irqrestore(lock: &chan->tx_lock, flags);
1103}
1104
1105int qmc_chan_reset(struct qmc_chan *chan, int direction)
1106{
1107 if (direction & QMC_CHAN_READ)
1108 qmc_chan_reset_rx(chan);
1109
1110 if (direction & QMC_CHAN_WRITE)
1111 qmc_chan_reset_tx(chan);
1112
1113 return 0;
1114}
1115EXPORT_SYMBOL(qmc_chan_reset);
1116
1117static int qmc_check_chans(struct qmc *qmc)
1118{
1119 struct tsa_serial_info info;
1120 struct qmc_chan *chan;
1121 u64 tx_ts_assigned_mask;
1122 u64 rx_ts_assigned_mask;
1123 int ret;
1124
1125 /* Retrieve info from the TSA related serial */
1126 ret = tsa_serial_get_info(tsa_serial: qmc->tsa_serial, info: &info);
1127 if (ret)
1128 return ret;
1129
1130 if ((info.nb_tx_ts > 64) || (info.nb_rx_ts > 64)) {
1131 dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned not supported\n");
1132 return -EINVAL;
1133 }
1134
1135 /*
1136 * If more than 32 TS are assigned to this serial, one common table is
1137 * used for Tx and Rx and so masks must be equal for all channels.
1138 */
1139 if ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) {
1140 if (info.nb_tx_ts != info.nb_rx_ts) {
1141 dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
1142 return -EINVAL;
1143 }
1144 }
1145
1146 tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1;
1147 rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1;
1148
1149 list_for_each_entry(chan, &qmc->chan_head, list) {
1150 if (chan->tx_ts_mask_avail > tx_ts_assigned_mask) {
1151 dev_err(qmc->dev, "chan %u can use TSA unassigned Tx TS\n", chan->id);
1152 return -EINVAL;
1153 }
1154
1155 if (chan->rx_ts_mask_avail > rx_ts_assigned_mask) {
1156 dev_err(qmc->dev, "chan %u can use TSA unassigned Rx TS\n", chan->id);
1157 return -EINVAL;
1158 }
1159 }
1160
1161 return 0;
1162}
1163
1164static unsigned int qmc_nb_chans(struct qmc *qmc)
1165{
1166 unsigned int count = 0;
1167 struct qmc_chan *chan;
1168
1169 list_for_each_entry(chan, &qmc->chan_head, list)
1170 count++;
1171
1172 return count;
1173}
1174
1175static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
1176{
1177 struct device_node *chan_np;
1178 struct qmc_chan *chan;
1179 const char *mode;
1180 u32 chan_id;
1181 u64 ts_mask;
1182 int ret;
1183
1184 for_each_available_child_of_node(np, chan_np) {
1185 ret = of_property_read_u32(np: chan_np, propname: "reg", out_value: &chan_id);
1186 if (ret) {
1187 dev_err(qmc->dev, "%pOF: failed to read reg\n", chan_np);
1188 of_node_put(node: chan_np);
1189 return ret;
1190 }
1191 if (chan_id > 63) {
1192 dev_err(qmc->dev, "%pOF: Invalid chan_id\n", chan_np);
1193 of_node_put(node: chan_np);
1194 return -EINVAL;
1195 }
1196
1197 chan = devm_kzalloc(dev: qmc->dev, size: sizeof(*chan), GFP_KERNEL);
1198 if (!chan) {
1199 of_node_put(node: chan_np);
1200 return -ENOMEM;
1201 }
1202
1203 chan->id = chan_id;
1204 spin_lock_init(&chan->ts_lock);
1205 spin_lock_init(&chan->rx_lock);
1206 spin_lock_init(&chan->tx_lock);
1207
1208 ret = of_property_read_u64(np: chan_np, propname: "fsl,tx-ts-mask", out_value: &ts_mask);
1209 if (ret) {
1210 dev_err(qmc->dev, "%pOF: failed to read fsl,tx-ts-mask\n",
1211 chan_np);
1212 of_node_put(node: chan_np);
1213 return ret;
1214 }
1215 chan->tx_ts_mask_avail = ts_mask;
1216 chan->tx_ts_mask = chan->tx_ts_mask_avail;
1217
1218 ret = of_property_read_u64(np: chan_np, propname: "fsl,rx-ts-mask", out_value: &ts_mask);
1219 if (ret) {
1220 dev_err(qmc->dev, "%pOF: failed to read fsl,rx-ts-mask\n",
1221 chan_np);
1222 of_node_put(node: chan_np);
1223 return ret;
1224 }
1225 chan->rx_ts_mask_avail = ts_mask;
1226 chan->rx_ts_mask = chan->rx_ts_mask_avail;
1227
1228 mode = "transparent";
1229 ret = of_property_read_string(np: chan_np, propname: "fsl,operational-mode", out_string: &mode);
1230 if (ret && ret != -EINVAL) {
1231 dev_err(qmc->dev, "%pOF: failed to read fsl,operational-mode\n",
1232 chan_np);
1233 of_node_put(node: chan_np);
1234 return ret;
1235 }
1236 if (!strcmp(mode, "transparent")) {
1237 chan->mode = QMC_TRANSPARENT;
1238 } else if (!strcmp(mode, "hdlc")) {
1239 chan->mode = QMC_HDLC;
1240 } else {
1241 dev_err(qmc->dev, "%pOF: Invalid fsl,operational-mode (%s)\n",
1242 chan_np, mode);
1243 of_node_put(node: chan_np);
1244 return -EINVAL;
1245 }
1246
1247 chan->is_reverse_data = of_property_read_bool(np: chan_np,
1248 propname: "fsl,reverse-data");
1249
1250 list_add_tail(new: &chan->list, head: &qmc->chan_head);
1251 qmc->chans[chan->id] = chan;
1252 }
1253
1254 return qmc_check_chans(qmc);
1255}
1256
1257static int qmc_init_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
1258{
1259 unsigned int i;
1260 u16 val;
1261
1262 /*
1263 * Use a common Tx/Rx 64 entries table.
1264 * Everything was previously checked, Tx and Rx related stuffs are
1265 * identical -> Used Rx related stuff to build the table
1266 */
1267 qmc->is_tsa_64rxtx = true;
1268
1269 /* Invalidate all entries */
1270 for (i = 0; i < 64; i++)
1271 qmc_write16(addr: qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val: 0x0000);
1272
1273 /* Set Wrap bit on last entry */
1274 qmc_setbits16(addr: qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
1275 QMC_TSA_WRAP);
1276
1277 /* Init pointers to the table */
1278 val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
1279 qmc_write16(addr: qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
1280 qmc_write16(addr: qmc->scc_pram + QMC_GBL_RXPTR, val);
1281 qmc_write16(addr: qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
1282 qmc_write16(addr: qmc->scc_pram + QMC_GBL_TXPTR, val);
1283
1284 return 0;
1285}
1286
1287static int qmc_init_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
1288{
1289 unsigned int i;
1290 u16 val;
1291
1292 /*
1293 * Use a Tx 32 entries table and a Rx 32 entries table.
1294 * Everything was previously checked.
1295 */
1296 qmc->is_tsa_64rxtx = false;
1297
1298 /* Invalidate all entries */
1299 for (i = 0; i < 32; i++) {
1300 qmc_write16(addr: qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val: 0x0000);
1301 qmc_write16(addr: qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), val: 0x0000);
1302 }
1303
1304 /* Set Wrap bit on last entries */
1305 qmc_setbits16(addr: qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
1306 QMC_TSA_WRAP);
1307 qmc_setbits16(addr: qmc->scc_pram + QMC_GBL_TSATTX + ((info->nb_tx_ts - 1) * 2),
1308 QMC_TSA_WRAP);
1309
1310 /* Init Rx pointers ...*/
1311 val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
1312 qmc_write16(addr: qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
1313 qmc_write16(addr: qmc->scc_pram + QMC_GBL_RXPTR, val);
1314
1315 /* ... and Tx pointers */
1316 val = qmc->scc_pram_offset + QMC_GBL_TSATTX;
1317 qmc_write16(addr: qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
1318 qmc_write16(addr: qmc->scc_pram + QMC_GBL_TXPTR, val);
1319
1320 return 0;
1321}
1322
1323static int qmc_init_tsa(struct qmc *qmc)
1324{
1325 struct tsa_serial_info info;
1326 int ret;
1327
1328 /* Retrieve info from the TSA related serial */
1329 ret = tsa_serial_get_info(tsa_serial: qmc->tsa_serial, info: &info);
1330 if (ret)
1331 return ret;
1332
1333 /*
1334 * Initialize one common 64 entries table or two 32 entries (one for Tx
1335 * and one for Tx) according to assigned TS numbers.
1336 */
1337 return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
1338 qmc_init_tsa_64rxtx(qmc, info: &info) :
1339 qmc_init_tsa_32rx_32tx(qmc, info: &info);
1340}
1341
1342static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
1343{
1344 unsigned int i;
1345 cbd_t __iomem *bd;
1346 int ret;
1347 u16 val;
1348
1349 chan->qmc = qmc;
1350
1351 /* Set channel specific parameter base address */
1352 chan->s_param = qmc->dpram + (chan->id * 64);
1353 /* 16 bd per channel (8 rx and 8 tx) */
1354 chan->txbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS));
1355 chan->rxbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS;
1356
1357 chan->txbd_free = chan->txbds;
1358 chan->txbd_done = chan->txbds;
1359 chan->rxbd_free = chan->rxbds;
1360 chan->rxbd_done = chan->rxbds;
1361
1362 /* TBASE and TBPTR*/
1363 val = chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS) * sizeof(cbd_t);
1364 qmc_write16(addr: chan->s_param + QMC_SPE_TBASE, val);
1365 qmc_write16(addr: chan->s_param + QMC_SPE_TBPTR, val);
1366
1367 /* RBASE and RBPTR*/
1368 val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t);
1369 qmc_write16(addr: chan->s_param + QMC_SPE_RBASE, val);
1370 qmc_write16(addr: chan->s_param + QMC_SPE_RBPTR, val);
1371 qmc_write32(addr: chan->s_param + QMC_SPE_TSTATE, val: 0x30000000);
1372 qmc_write32(addr: chan->s_param + QMC_SPE_RSTATE, val: 0x31000000);
1373 qmc_write32(addr: chan->s_param + QMC_SPE_ZISTATE, val: 0x00000100);
1374 if (chan->mode == QMC_TRANSPARENT) {
1375 qmc_write32(addr: chan->s_param + QMC_SPE_ZDSTATE, val: 0x18000080);
1376 qmc_write16(addr: chan->s_param + QMC_SPE_TMRBLR, val: 60);
1377 val = QMC_SPE_CHAMR_MODE_TRANSP | QMC_SPE_CHAMR_TRANSP_SYNC;
1378 if (chan->is_reverse_data)
1379 val |= QMC_SPE_CHAMR_TRANSP_RD;
1380 qmc_write16(addr: chan->s_param + QMC_SPE_CHAMR, val);
1381 ret = qmc_setup_chan_trnsync(qmc, chan);
1382 if (ret)
1383 return ret;
1384 } else {
1385 qmc_write32(addr: chan->s_param + QMC_SPE_ZDSTATE, val: 0x00000080);
1386 qmc_write16(addr: chan->s_param + QMC_SPE_MFLR, val: 60);
1387 qmc_write16(addr: chan->s_param + QMC_SPE_CHAMR,
1388 QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM);
1389 }
1390
1391 /* Do not enable interrupts now. They will be enabled later */
1392 qmc_write16(addr: chan->s_param + QMC_SPE_INTMSK, val: 0x0000);
1393
1394 /* Init Rx BDs and set Wrap bit on last descriptor */
1395 BUILD_BUG_ON(QMC_NB_RXBDS == 0);
1396 val = QMC_BD_RX_I;
1397 for (i = 0; i < QMC_NB_RXBDS; i++) {
1398 bd = chan->rxbds + i;
1399 qmc_write16(addr: &bd->cbd_sc, val);
1400 }
1401 bd = chan->rxbds + QMC_NB_RXBDS - 1;
1402 qmc_write16(addr: &bd->cbd_sc, val: val | QMC_BD_RX_W);
1403
1404 /* Init Tx BDs and set Wrap bit on last descriptor */
1405 BUILD_BUG_ON(QMC_NB_TXBDS == 0);
1406 val = QMC_BD_TX_I;
1407 if (chan->mode == QMC_HDLC)
1408 val |= QMC_BD_TX_L | QMC_BD_TX_TC;
1409 for (i = 0; i < QMC_NB_TXBDS; i++) {
1410 bd = chan->txbds + i;
1411 qmc_write16(addr: &bd->cbd_sc, val);
1412 }
1413 bd = chan->txbds + QMC_NB_TXBDS - 1;
1414 qmc_write16(addr: &bd->cbd_sc, val: val | QMC_BD_TX_W);
1415
1416 return 0;
1417}
1418
1419static int qmc_setup_chans(struct qmc *qmc)
1420{
1421 struct qmc_chan *chan;
1422 int ret;
1423
1424 list_for_each_entry(chan, &qmc->chan_head, list) {
1425 ret = qmc_setup_chan(qmc, chan);
1426 if (ret)
1427 return ret;
1428 }
1429
1430 return 0;
1431}
1432
1433static int qmc_finalize_chans(struct qmc *qmc)
1434{
1435 struct qmc_chan *chan;
1436 int ret;
1437
1438 list_for_each_entry(chan, &qmc->chan_head, list) {
1439 /* Unmask channel interrupts */
1440 if (chan->mode == QMC_HDLC) {
1441 qmc_write16(addr: chan->s_param + QMC_SPE_INTMSK,
1442 QMC_INT_NID | QMC_INT_IDL | QMC_INT_MRF |
1443 QMC_INT_UN | QMC_INT_RXF | QMC_INT_BSY |
1444 QMC_INT_TXB | QMC_INT_RXB);
1445 } else {
1446 qmc_write16(addr: chan->s_param + QMC_SPE_INTMSK,
1447 QMC_INT_UN | QMC_INT_BSY |
1448 QMC_INT_TXB | QMC_INT_RXB);
1449 }
1450
1451 /* Forced stop the channel */
1452 ret = qmc_chan_stop(chan, QMC_CHAN_ALL);
1453 if (ret)
1454 return ret;
1455 }
1456
1457 return 0;
1458}
1459
1460static int qmc_setup_ints(struct qmc *qmc)
1461{
1462 unsigned int i;
1463 u16 __iomem *last;
1464
1465 /* Raz all entries */
1466 for (i = 0; i < (qmc->int_size / sizeof(u16)); i++)
1467 qmc_write16(addr: qmc->int_table + i, val: 0x0000);
1468
1469 /* Set Wrap bit on last entry */
1470 if (qmc->int_size >= sizeof(u16)) {
1471 last = qmc->int_table + (qmc->int_size / sizeof(u16)) - 1;
1472 qmc_write16(addr: last, QMC_INT_W);
1473 }
1474
1475 return 0;
1476}
1477
1478static void qmc_irq_gint(struct qmc *qmc)
1479{
1480 struct qmc_chan *chan;
1481 unsigned int chan_id;
1482 unsigned long flags;
1483 u16 int_entry;
1484
1485 int_entry = qmc_read16(addr: qmc->int_curr);
1486 while (int_entry & QMC_INT_V) {
1487 /* Clear all but the Wrap bit */
1488 qmc_write16(addr: qmc->int_curr, val: int_entry & QMC_INT_W);
1489
1490 chan_id = QMC_INT_GET_CHANNEL(int_entry);
1491 chan = qmc->chans[chan_id];
1492 if (!chan) {
1493 dev_err(qmc->dev, "interrupt on invalid chan %u\n", chan_id);
1494 goto int_next;
1495 }
1496
1497 if (int_entry & QMC_INT_TXB)
1498 qmc_chan_write_done(chan);
1499
1500 if (int_entry & QMC_INT_UN) {
1501 dev_info(qmc->dev, "intr chan %u, 0x%04x (UN)\n", chan_id,
1502 int_entry);
1503 chan->nb_tx_underrun++;
1504 }
1505
1506 if (int_entry & QMC_INT_BSY) {
1507 dev_info(qmc->dev, "intr chan %u, 0x%04x (BSY)\n", chan_id,
1508 int_entry);
1509 chan->nb_rx_busy++;
1510 /* Restart the receiver if needed */
1511 spin_lock_irqsave(&chan->rx_lock, flags);
1512 if (chan->rx_pending && !chan->is_rx_stopped) {
1513 if (chan->mode == QMC_TRANSPARENT)
1514 qmc_write32(addr: chan->s_param + QMC_SPE_ZDSTATE, val: 0x18000080);
1515 else
1516 qmc_write32(addr: chan->s_param + QMC_SPE_ZDSTATE, val: 0x00000080);
1517 qmc_write32(addr: chan->s_param + QMC_SPE_RSTATE, val: 0x31000000);
1518 chan->is_rx_halted = false;
1519 } else {
1520 chan->is_rx_halted = true;
1521 }
1522 spin_unlock_irqrestore(lock: &chan->rx_lock, flags);
1523 }
1524
1525 if (int_entry & QMC_INT_RXB)
1526 qmc_chan_read_done(chan);
1527
1528int_next:
1529 if (int_entry & QMC_INT_W)
1530 qmc->int_curr = qmc->int_table;
1531 else
1532 qmc->int_curr++;
1533 int_entry = qmc_read16(addr: qmc->int_curr);
1534 }
1535}
1536
1537static irqreturn_t qmc_irq_handler(int irq, void *priv)
1538{
1539 struct qmc *qmc = (struct qmc *)priv;
1540 u16 scce;
1541
1542 scce = qmc_read16(addr: qmc->scc_regs + SCC_SCCE);
1543 qmc_write16(addr: qmc->scc_regs + SCC_SCCE, val: scce);
1544
1545 if (unlikely(scce & SCC_SCCE_IQOV))
1546 dev_info(qmc->dev, "IRQ queue overflow\n");
1547
1548 if (unlikely(scce & SCC_SCCE_GUN))
1549 dev_err(qmc->dev, "Global transmitter underrun\n");
1550
1551 if (unlikely(scce & SCC_SCCE_GOV))
1552 dev_err(qmc->dev, "Global receiver overrun\n");
1553
1554 /* normal interrupt */
1555 if (likely(scce & SCC_SCCE_GINT))
1556 qmc_irq_gint(qmc);
1557
1558 return IRQ_HANDLED;
1559}
1560
1561static int qmc_probe(struct platform_device *pdev)
1562{
1563 struct device_node *np = pdev->dev.of_node;
1564 unsigned int nb_chans;
1565 struct resource *res;
1566 struct qmc *qmc;
1567 int irq;
1568 int ret;
1569
1570 qmc = devm_kzalloc(dev: &pdev->dev, size: sizeof(*qmc), GFP_KERNEL);
1571 if (!qmc)
1572 return -ENOMEM;
1573
1574 qmc->dev = &pdev->dev;
1575 INIT_LIST_HEAD(list: &qmc->chan_head);
1576
1577 qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, name: "scc_regs");
1578 if (IS_ERR(ptr: qmc->scc_regs))
1579 return PTR_ERR(ptr: qmc->scc_regs);
1580
1581
1582 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scc_pram");
1583 if (!res)
1584 return -EINVAL;
1585 qmc->scc_pram_offset = res->start - get_immrbase();
1586 qmc->scc_pram = devm_ioremap_resource(dev: qmc->dev, res);
1587 if (IS_ERR(ptr: qmc->scc_pram))
1588 return PTR_ERR(ptr: qmc->scc_pram);
1589
1590 qmc->dpram = devm_platform_ioremap_resource_byname(pdev, name: "dpram");
1591 if (IS_ERR(ptr: qmc->dpram))
1592 return PTR_ERR(ptr: qmc->dpram);
1593
1594 qmc->tsa_serial = devm_tsa_serial_get_byphandle(dev: qmc->dev, np, phandle_name: "fsl,tsa-serial");
1595 if (IS_ERR(ptr: qmc->tsa_serial)) {
1596 return dev_err_probe(dev: qmc->dev, err: PTR_ERR(ptr: qmc->tsa_serial),
1597 fmt: "Failed to get TSA serial\n");
1598 }
1599
1600 /* Connect the serial (SCC) to TSA */
1601 ret = tsa_serial_connect(tsa_serial: qmc->tsa_serial);
1602 if (ret) {
1603 dev_err(qmc->dev, "Failed to connect TSA serial\n");
1604 return ret;
1605 }
1606
1607 /* Parse channels informationss */
1608 ret = qmc_of_parse_chans(qmc, np);
1609 if (ret)
1610 goto err_tsa_serial_disconnect;
1611
1612 nb_chans = qmc_nb_chans(qmc);
1613
1614 /* Init GMSR_H and GMSR_L registers */
1615 qmc_write32(addr: qmc->scc_regs + SCC_GSMRH,
1616 SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP);
1617
1618 /* enable QMC mode */
1619 qmc_write32(addr: qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_MODE_QMC);
1620
1621 /*
1622 * Allocate the buffer descriptor table
1623 * 8 rx and 8 tx descriptors per channel
1624 */
1625 qmc->bd_size = (nb_chans * (QMC_NB_TXBDS + QMC_NB_RXBDS)) * sizeof(cbd_t);
1626 qmc->bd_table = dmam_alloc_coherent(dev: qmc->dev, size: qmc->bd_size,
1627 dma_handle: &qmc->bd_dma_addr, GFP_KERNEL);
1628 if (!qmc->bd_table) {
1629 dev_err(qmc->dev, "Failed to allocate bd table\n");
1630 ret = -ENOMEM;
1631 goto err_tsa_serial_disconnect;
1632 }
1633 memset(qmc->bd_table, 0, qmc->bd_size);
1634
1635 qmc_write32(addr: qmc->scc_pram + QMC_GBL_MCBASE, val: qmc->bd_dma_addr);
1636
1637 /* Allocate the interrupt table */
1638 qmc->int_size = QMC_NB_INTS * sizeof(u16);
1639 qmc->int_table = dmam_alloc_coherent(dev: qmc->dev, size: qmc->int_size,
1640 dma_handle: &qmc->int_dma_addr, GFP_KERNEL);
1641 if (!qmc->int_table) {
1642 dev_err(qmc->dev, "Failed to allocate interrupt table\n");
1643 ret = -ENOMEM;
1644 goto err_tsa_serial_disconnect;
1645 }
1646 memset(qmc->int_table, 0, qmc->int_size);
1647
1648 qmc->int_curr = qmc->int_table;
1649 qmc_write32(addr: qmc->scc_pram + QMC_GBL_INTBASE, val: qmc->int_dma_addr);
1650 qmc_write32(addr: qmc->scc_pram + QMC_GBL_INTPTR, val: qmc->int_dma_addr);
1651
1652 /* Set MRBLR (valid for HDLC only) max MRU + max CRC */
1653 qmc_write16(addr: qmc->scc_pram + QMC_GBL_MRBLR, HDLC_MAX_MRU + 4);
1654
1655 qmc_write16(addr: qmc->scc_pram + QMC_GBL_GRFTHR, val: 1);
1656 qmc_write16(addr: qmc->scc_pram + QMC_GBL_GRFCNT, val: 1);
1657
1658 qmc_write32(addr: qmc->scc_pram + QMC_GBL_C_MASK32, val: 0xDEBB20E3);
1659 qmc_write16(addr: qmc->scc_pram + QMC_GBL_C_MASK16, val: 0xF0B8);
1660
1661 ret = qmc_init_tsa(qmc);
1662 if (ret)
1663 goto err_tsa_serial_disconnect;
1664
1665 qmc_write16(addr: qmc->scc_pram + QMC_GBL_QMCSTATE, val: 0x8000);
1666
1667 ret = qmc_setup_chans(qmc);
1668 if (ret)
1669 goto err_tsa_serial_disconnect;
1670
1671 /* Init interrupts table */
1672 ret = qmc_setup_ints(qmc);
1673 if (ret)
1674 goto err_tsa_serial_disconnect;
1675
1676 /* Disable and clear interrupts, set the irq handler */
1677 qmc_write16(addr: qmc->scc_regs + SCC_SCCM, val: 0x0000);
1678 qmc_write16(addr: qmc->scc_regs + SCC_SCCE, val: 0x000F);
1679 irq = platform_get_irq(pdev, 0);
1680 if (irq < 0)
1681 goto err_tsa_serial_disconnect;
1682 ret = devm_request_irq(dev: qmc->dev, irq, handler: qmc_irq_handler, irqflags: 0, devname: "qmc", dev_id: qmc);
1683 if (ret < 0)
1684 goto err_tsa_serial_disconnect;
1685
1686 /* Enable interrupts */
1687 qmc_write16(addr: qmc->scc_regs + SCC_SCCM,
1688 SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV);
1689
1690 ret = qmc_finalize_chans(qmc);
1691 if (ret < 0)
1692 goto err_disable_intr;
1693
1694 /* Enable transmiter and receiver */
1695 qmc_setbits32(addr: qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
1696
1697 platform_set_drvdata(pdev, data: qmc);
1698
1699 /* Populate channel related devices */
1700 ret = devm_of_platform_populate(dev: qmc->dev);
1701 if (ret)
1702 goto err_disable_txrx;
1703
1704 return 0;
1705
1706err_disable_txrx:
1707 qmc_setbits32(addr: qmc->scc_regs + SCC_GSMRL, set: 0);
1708
1709err_disable_intr:
1710 qmc_write16(addr: qmc->scc_regs + SCC_SCCM, val: 0);
1711
1712err_tsa_serial_disconnect:
1713 tsa_serial_disconnect(tsa_serial: qmc->tsa_serial);
1714 return ret;
1715}
1716
1717static void qmc_remove(struct platform_device *pdev)
1718{
1719 struct qmc *qmc = platform_get_drvdata(pdev);
1720
1721 /* Disable transmiter and receiver */
1722 qmc_setbits32(addr: qmc->scc_regs + SCC_GSMRL, set: 0);
1723
1724 /* Disable interrupts */
1725 qmc_write16(addr: qmc->scc_regs + SCC_SCCM, val: 0);
1726
1727 /* Disconnect the serial from TSA */
1728 tsa_serial_disconnect(tsa_serial: qmc->tsa_serial);
1729}
1730
1731static const struct of_device_id qmc_id_table[] = {
1732 { .compatible = "fsl,cpm1-scc-qmc" },
1733 {} /* sentinel */
1734};
1735MODULE_DEVICE_TABLE(of, qmc_id_table);
1736
1737static struct platform_driver qmc_driver = {
1738 .driver = {
1739 .name = "fsl-qmc",
1740 .of_match_table = of_match_ptr(qmc_id_table),
1741 },
1742 .probe = qmc_probe,
1743 .remove_new = qmc_remove,
1744};
1745module_platform_driver(qmc_driver);
1746
1747static struct qmc_chan *qmc_chan_get_from_qmc(struct device_node *qmc_np, unsigned int chan_index)
1748{
1749 struct platform_device *pdev;
1750 struct qmc_chan *qmc_chan;
1751 struct qmc *qmc;
1752
1753 if (!of_match_node(matches: qmc_driver.driver.of_match_table, node: qmc_np))
1754 return ERR_PTR(error: -EINVAL);
1755
1756 pdev = of_find_device_by_node(np: qmc_np);
1757 if (!pdev)
1758 return ERR_PTR(error: -ENODEV);
1759
1760 qmc = platform_get_drvdata(pdev);
1761 if (!qmc) {
1762 platform_device_put(pdev);
1763 return ERR_PTR(error: -EPROBE_DEFER);
1764 }
1765
1766 if (chan_index >= ARRAY_SIZE(qmc->chans)) {
1767 platform_device_put(pdev);
1768 return ERR_PTR(error: -EINVAL);
1769 }
1770
1771 qmc_chan = qmc->chans[chan_index];
1772 if (!qmc_chan) {
1773 platform_device_put(pdev);
1774 return ERR_PTR(error: -ENOENT);
1775 }
1776
1777 return qmc_chan;
1778}
1779
1780struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name)
1781{
1782 struct of_phandle_args out_args;
1783 struct qmc_chan *qmc_chan;
1784 int ret;
1785
1786 ret = of_parse_phandle_with_fixed_args(np, list_name: phandle_name, cell_count: 1, index: 0,
1787 out_args: &out_args);
1788 if (ret < 0)
1789 return ERR_PTR(error: ret);
1790
1791 if (out_args.args_count != 1) {
1792 of_node_put(node: out_args.np);
1793 return ERR_PTR(error: -EINVAL);
1794 }
1795
1796 qmc_chan = qmc_chan_get_from_qmc(qmc_np: out_args.np, chan_index: out_args.args[0]);
1797 of_node_put(node: out_args.np);
1798 return qmc_chan;
1799}
1800EXPORT_SYMBOL(qmc_chan_get_byphandle);
1801
1802struct qmc_chan *qmc_chan_get_bychild(struct device_node *np)
1803{
1804 struct device_node *qmc_np;
1805 u32 chan_index;
1806 int ret;
1807
1808 qmc_np = np->parent;
1809 ret = of_property_read_u32(np, propname: "reg", out_value: &chan_index);
1810 if (ret)
1811 return ERR_PTR(error: -EINVAL);
1812
1813 return qmc_chan_get_from_qmc(qmc_np, chan_index);
1814}
1815EXPORT_SYMBOL(qmc_chan_get_bychild);
1816
1817void qmc_chan_put(struct qmc_chan *chan)
1818{
1819 put_device(dev: chan->qmc->dev);
1820}
1821EXPORT_SYMBOL(qmc_chan_put);
1822
1823static void devm_qmc_chan_release(struct device *dev, void *res)
1824{
1825 struct qmc_chan **qmc_chan = res;
1826
1827 qmc_chan_put(*qmc_chan);
1828}
1829
1830struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev,
1831 struct device_node *np,
1832 const char *phandle_name)
1833{
1834 struct qmc_chan *qmc_chan;
1835 struct qmc_chan **dr;
1836
1837 dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
1838 if (!dr)
1839 return ERR_PTR(error: -ENOMEM);
1840
1841 qmc_chan = qmc_chan_get_byphandle(np, phandle_name);
1842 if (!IS_ERR(ptr: qmc_chan)) {
1843 *dr = qmc_chan;
1844 devres_add(dev, res: dr);
1845 } else {
1846 devres_free(res: dr);
1847 }
1848
1849 return qmc_chan;
1850}
1851EXPORT_SYMBOL(devm_qmc_chan_get_byphandle);
1852
1853struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev,
1854 struct device_node *np)
1855{
1856 struct qmc_chan *qmc_chan;
1857 struct qmc_chan **dr;
1858
1859 dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
1860 if (!dr)
1861 return ERR_PTR(error: -ENOMEM);
1862
1863 qmc_chan = qmc_chan_get_bychild(np);
1864 if (!IS_ERR(ptr: qmc_chan)) {
1865 *dr = qmc_chan;
1866 devres_add(dev, res: dr);
1867 } else {
1868 devres_free(res: dr);
1869 }
1870
1871 return qmc_chan;
1872}
1873EXPORT_SYMBOL(devm_qmc_chan_get_bychild);
1874
1875MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
1876MODULE_DESCRIPTION("CPM QMC driver");
1877MODULE_LICENSE("GPL");
1878

source code of linux/drivers/soc/fsl/qe/qmc.c