Warning: That file was not part of the compilation database. It may have many parsing errors.

1/*
2 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
3 *
4 * Copyright (C) 2014 Atmel Corporation
5 *
6 * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <asm/barrier.h>
22#include <dt-bindings/dma/at91.h>
23#include <linux/clk.h>
24#include <linux/dmaengine.h>
25#include <linux/dmapool.h>
26#include <linux/interrupt.h>
27#include <linux/irq.h>
28#include <linux/kernel.h>
29#include <linux/list.h>
30#include <linux/module.h>
31#include <linux/of_dma.h>
32#include <linux/of_platform.h>
33#include <linux/platform_device.h>
34#include <linux/pm.h>
35
36#include "dmaengine.h"
37
38/* Global registers */
39#define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
40#define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
41#define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
42#define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
43#define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
44#define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
45#define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
46#define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
47#define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
48#define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
49#define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
50#define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
51#define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
52#define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */
53#define AT_XDMAC_GWS 0x2C /* Global Write Suspend Register */
54#define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */
55#define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */
56#define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */
57#define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */
58#define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */
59#define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
60
61/* Channel relative registers offsets */
62#define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
63#define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
64#define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
65#define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
66#define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
67#define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
68#define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
69#define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
70#define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
71#define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
72#define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
73#define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
74#define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
75#define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
76#define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
77#define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
78#define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
79#define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
80#define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
81#define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
82#define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
83#define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
84#define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
85#define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
86#define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
87#define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
88#define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
89#define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
90#define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
91#define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
92#define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
93#define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
94#define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
95#define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
96#define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
97#define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
98#define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
99#define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
100#define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
101#define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
102#define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
103#define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
104#define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
105#define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
106#define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
107#define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
108#define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
109#define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
110#define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
111#define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
112#define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
113#define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
114#define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
115#define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
116#define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
117#define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
118#define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
119#define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
120#define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
121#define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
122#define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
123#define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
124#define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
125#define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
126#define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
127#define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
128#define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
129#define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
130#define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
131#define AT_XDMAC_CC_DWIDTH_OFFSET 11
132#define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
133#define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
134#define AT_XDMAC_CC_DWIDTH_BYTE 0x0
135#define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
136#define AT_XDMAC_CC_DWIDTH_WORD 0x2
137#define AT_XDMAC_CC_DWIDTH_DWORD 0x3
138#define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
139#define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
140#define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
141#define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
142#define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
143#define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
144#define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
145#define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
146#define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
147#define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
148#define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
149#define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
150#define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
151#define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
152#define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
153#define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
154#define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
155#define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
156#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
157#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
158#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
159#define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */
160#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
161#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
162#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
163
164#define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */
165
166/* Microblock control members */
167#define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
168#define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
169#define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
170#define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
171#define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
172#define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
173#define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
174#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
175
176#define AT_XDMAC_MAX_CHAN 0x20
177#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
178#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
179#define AT_XDMAC_RESIDUE_MAX_RETRIES 5
180
181#define AT_XDMAC_DMA_BUSWIDTHS\
182 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
183 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
184 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
185 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
186 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
187
188enum atc_status {
189 AT_XDMAC_CHAN_IS_CYCLIC = 0,
190 AT_XDMAC_CHAN_IS_PAUSED,
191};
192
193/* ----- Channels ----- */
194struct at_xdmac_chan {
195 struct dma_chan chan;
196 void __iomem *ch_regs;
197 u32 mask; /* Channel Mask */
198 u32 cfg; /* Channel Configuration Register */
199 u8 perid; /* Peripheral ID */
200 u8 perif; /* Peripheral Interface */
201 u8 memif; /* Memory Interface */
202 u32 save_cc;
203 u32 save_cim;
204 u32 save_cnda;
205 u32 save_cndc;
206 u32 irq_status;
207 unsigned long status;
208 struct tasklet_struct tasklet;
209 struct dma_slave_config sconfig;
210
211 spinlock_t lock;
212
213 struct list_head xfers_list;
214 struct list_head free_descs_list;
215};
216
217
218/* ----- Controller ----- */
219struct at_xdmac {
220 struct dma_device dma;
221 void __iomem *regs;
222 int irq;
223 struct clk *clk;
224 u32 save_gim;
225 struct dma_pool *at_xdmac_desc_pool;
226 struct at_xdmac_chan chan[0];
227};
228
229
230/* ----- Descriptors ----- */
231
232/* Linked List Descriptor */
233struct at_xdmac_lld {
234 dma_addr_t mbr_nda; /* Next Descriptor Member */
235 u32 mbr_ubc; /* Microblock Control Member */
236 dma_addr_t mbr_sa; /* Source Address Member */
237 dma_addr_t mbr_da; /* Destination Address Member */
238 u32 mbr_cfg; /* Configuration Register */
239 u32 mbr_bc; /* Block Control Register */
240 u32 mbr_ds; /* Data Stride Register */
241 u32 mbr_sus; /* Source Microblock Stride Register */
242 u32 mbr_dus; /* Destination Microblock Stride Register */
243};
244
245/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
246struct at_xdmac_desc {
247 struct at_xdmac_lld lld;
248 enum dma_transfer_direction direction;
249 struct dma_async_tx_descriptor tx_dma_desc;
250 struct list_head desc_node;
251 /* Following members are only used by the first descriptor */
252 bool active_xfer;
253 unsigned int xfer_size;
254 struct list_head descs_list;
255 struct list_head xfer_node;
256} __aligned(sizeof(u64));
257
258static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
259{
260 return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40);
261}
262
263#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
264#define at_xdmac_write(atxdmac, reg, value) \
265 writel_relaxed((value), (atxdmac)->regs + (reg))
266
267#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
268#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
269
270static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
271{
272 return container_of(dchan, struct at_xdmac_chan, chan);
273}
274
275static struct device *chan2dev(struct dma_chan *chan)
276{
277 return &chan->dev->device;
278}
279
280static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
281{
282 return container_of(ddev, struct at_xdmac, dma);
283}
284
285static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
286{
287 return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
288}
289
290static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
291{
292 return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
293}
294
295static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
296{
297 return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
298}
299
300static inline int at_xdmac_csize(u32 maxburst)
301{
302 int csize;
303
304 csize = ffs(maxburst) - 1;
305 if (csize > 4)
306 csize = -EINVAL;
307
308 return csize;
309};
310
311static inline u8 at_xdmac_get_dwidth(u32 cfg)
312{
313 return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
314};
315
316static unsigned int init_nr_desc_per_channel = 64;
317module_param(init_nr_desc_per_channel, uint, 0644);
318MODULE_PARM_DESC(init_nr_desc_per_channel,
319 "initial descriptors per channel (default: 64)");
320
321
322static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
323{
324 return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
325}
326
327static void at_xdmac_off(struct at_xdmac *atxdmac)
328{
329 at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
330
331 /* Wait that all chans are disabled. */
332 while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
333 cpu_relax();
334
335 at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
336}
337
338/* Call with lock hold. */
339static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
340 struct at_xdmac_desc *first)
341{
342 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
343 u32 reg;
344
345 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
346
347 if (at_xdmac_chan_is_enabled(atchan))
348 return;
349
350 /* Set transfer as active to not try to start it again. */
351 first->active_xfer = true;
352
353 /* Tell xdmac where to get the first descriptor. */
354 reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys)
355 | AT_XDMAC_CNDA_NDAIF(atchan->memif);
356 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
357
358 /*
359 * When doing non cyclic transfer we need to use the next
360 * descriptor view 2 since some fields of the configuration register
361 * depend on transfer size and src/dest addresses.
362 */
363 if (at_xdmac_chan_is_cyclic(atchan))
364 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
365 else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
366 reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
367 else
368 reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
369 /*
370 * Even if the register will be updated from the configuration in the
371 * descriptor when using view 2 or higher, the PROT bit won't be set
372 * properly. This bit can be modified only by using the channel
373 * configuration register.
374 */
375 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
376
377 reg |= AT_XDMAC_CNDC_NDDUP
378 | AT_XDMAC_CNDC_NDSUP
379 | AT_XDMAC_CNDC_NDE;
380 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
381
382 dev_vdbg(chan2dev(&atchan->chan),
383 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
384 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
385 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
386 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
387 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
388 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
389 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
390
391 at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
392 reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
393 /*
394 * There is no end of list when doing cyclic dma, we need to get
395 * an interrupt after each periods.
396 */
397 if (at_xdmac_chan_is_cyclic(atchan))
398 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
399 reg | AT_XDMAC_CIE_BIE);
400 else
401 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
402 reg | AT_XDMAC_CIE_LIE);
403 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
404 dev_vdbg(chan2dev(&atchan->chan),
405 "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
406 wmb();
407 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
408
409 dev_vdbg(chan2dev(&atchan->chan),
410 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
411 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
412 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
413 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
414 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
415 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
416 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
417
418}
419
420static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
421{
422 struct at_xdmac_desc *desc = txd_to_at_desc(tx);
423 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
424 dma_cookie_t cookie;
425 unsigned long irqflags;
426
427 spin_lock_irqsave(&atchan->lock, irqflags);
428 cookie = dma_cookie_assign(tx);
429
430 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
431 __func__, atchan, desc);
432 list_add_tail(&desc->xfer_node, &atchan->xfers_list);
433 if (list_is_singular(&atchan->xfers_list))
434 at_xdmac_start_xfer(atchan, desc);
435
436 spin_unlock_irqrestore(&atchan->lock, irqflags);
437 return cookie;
438}
439
440static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
441 gfp_t gfp_flags)
442{
443 struct at_xdmac_desc *desc;
444 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
445 dma_addr_t phys;
446
447 desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
448 if (desc) {
449 INIT_LIST_HEAD(&desc->descs_list);
450 dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
451 desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
452 desc->tx_dma_desc.phys = phys;
453 }
454
455 return desc;
456}
457
458static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
459{
460 memset(&desc->lld, 0, sizeof(desc->lld));
461 INIT_LIST_HEAD(&desc->descs_list);
462 desc->direction = DMA_TRANS_NONE;
463 desc->xfer_size = 0;
464 desc->active_xfer = false;
465}
466
467/* Call must be protected by lock. */
468static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
469{
470 struct at_xdmac_desc *desc;
471
472 if (list_empty(&atchan->free_descs_list)) {
473 desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
474 } else {
475 desc = list_first_entry(&atchan->free_descs_list,
476 struct at_xdmac_desc, desc_node);
477 list_del(&desc->desc_node);
478 at_xdmac_init_used_desc(desc);
479 }
480
481 return desc;
482}
483
484static void at_xdmac_queue_desc(struct dma_chan *chan,
485 struct at_xdmac_desc *prev,
486 struct at_xdmac_desc *desc)
487{
488 if (!prev || !desc)
489 return;
490
491 prev->lld.mbr_nda = desc->tx_dma_desc.phys;
492 prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
493
494 dev_dbg(chan2dev(chan), "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
495 __func__, prev, &prev->lld.mbr_nda);
496}
497
498static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
499 struct at_xdmac_desc *desc)
500{
501 if (!desc)
502 return;
503
504 desc->lld.mbr_bc++;
505
506 dev_dbg(chan2dev(chan),
507 "%s: incrementing the block count of the desc 0x%p\n",
508 __func__, desc);
509}
510
511static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
512 struct of_dma *of_dma)
513{
514 struct at_xdmac *atxdmac = of_dma->of_dma_data;
515 struct at_xdmac_chan *atchan;
516 struct dma_chan *chan;
517 struct device *dev = atxdmac->dma.dev;
518
519 if (dma_spec->args_count != 1) {
520 dev_err(dev, "dma phandler args: bad number of args\n");
521 return NULL;
522 }
523
524 chan = dma_get_any_slave_channel(&atxdmac->dma);
525 if (!chan) {
526 dev_err(dev, "can't get a dma channel\n");
527 return NULL;
528 }
529
530 atchan = to_at_xdmac_chan(chan);
531 atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
532 atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
533 atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
534 dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
535 atchan->memif, atchan->perif, atchan->perid);
536
537 return chan;
538}
539
540static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
541 enum dma_transfer_direction direction)
542{
543 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
544 int csize, dwidth;
545
546 if (direction == DMA_DEV_TO_MEM) {
547 atchan->cfg =
548 AT91_XDMAC_DT_PERID(atchan->perid)
549 | AT_XDMAC_CC_DAM_INCREMENTED_AM
550 | AT_XDMAC_CC_SAM_FIXED_AM
551 | AT_XDMAC_CC_DIF(atchan->memif)
552 | AT_XDMAC_CC_SIF(atchan->perif)
553 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
554 | AT_XDMAC_CC_DSYNC_PER2MEM
555 | AT_XDMAC_CC_MBSIZE_SIXTEEN
556 | AT_XDMAC_CC_TYPE_PER_TRAN;
557 csize = ffs(atchan->sconfig.src_maxburst) - 1;
558 if (csize < 0) {
559 dev_err(chan2dev(chan), "invalid src maxburst value\n");
560 return -EINVAL;
561 }
562 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
563 dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
564 if (dwidth < 0) {
565 dev_err(chan2dev(chan), "invalid src addr width value\n");
566 return -EINVAL;
567 }
568 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
569 } else if (direction == DMA_MEM_TO_DEV) {
570 atchan->cfg =
571 AT91_XDMAC_DT_PERID(atchan->perid)
572 | AT_XDMAC_CC_DAM_FIXED_AM
573 | AT_XDMAC_CC_SAM_INCREMENTED_AM
574 | AT_XDMAC_CC_DIF(atchan->perif)
575 | AT_XDMAC_CC_SIF(atchan->memif)
576 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
577 | AT_XDMAC_CC_DSYNC_MEM2PER
578 | AT_XDMAC_CC_MBSIZE_SIXTEEN
579 | AT_XDMAC_CC_TYPE_PER_TRAN;
580 csize = ffs(atchan->sconfig.dst_maxburst) - 1;
581 if (csize < 0) {
582 dev_err(chan2dev(chan), "invalid src maxburst value\n");
583 return -EINVAL;
584 }
585 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
586 dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
587 if (dwidth < 0) {
588 dev_err(chan2dev(chan), "invalid dst addr width value\n");
589 return -EINVAL;
590 }
591 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
592 }
593
594 dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
595
596 return 0;
597}
598
599/*
600 * Only check that maxburst and addr width values are supported by the
601 * the controller but not that the configuration is good to perform the
602 * transfer since we don't know the direction at this stage.
603 */
604static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
605{
606 if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
607 || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
608 return -EINVAL;
609
610 if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
611 || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
612 return -EINVAL;
613
614 return 0;
615}
616
617static int at_xdmac_set_slave_config(struct dma_chan *chan,
618 struct dma_slave_config *sconfig)
619{
620 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
621
622 if (at_xdmac_check_slave_config(sconfig)) {
623 dev_err(chan2dev(chan), "invalid slave configuration\n");
624 return -EINVAL;
625 }
626
627 memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
628
629 return 0;
630}
631
632static struct dma_async_tx_descriptor *
633at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
634 unsigned int sg_len, enum dma_transfer_direction direction,
635 unsigned long flags, void *context)
636{
637 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
638 struct at_xdmac_desc *first = NULL, *prev = NULL;
639 struct scatterlist *sg;
640 int i;
641 unsigned int xfer_size = 0;
642 unsigned long irqflags;
643 struct dma_async_tx_descriptor *ret = NULL;
644
645 if (!sgl)
646 return NULL;
647
648 if (!is_slave_direction(direction)) {
649 dev_err(chan2dev(chan), "invalid DMA direction\n");
650 return NULL;
651 }
652
653 dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
654 __func__, sg_len,
655 direction == DMA_MEM_TO_DEV ? "to device" : "from device",
656 flags);
657
658 /* Protect dma_sconfig field that can be modified by set_slave_conf. */
659 spin_lock_irqsave(&atchan->lock, irqflags);
660
661 if (at_xdmac_compute_chan_conf(chan, direction))
662 goto spin_unlock;
663
664 /* Prepare descriptors. */
665 for_each_sg(sgl, sg, sg_len, i) {
666 struct at_xdmac_desc *desc = NULL;
667 u32 len, mem, dwidth, fixed_dwidth;
668
669 len = sg_dma_len(sg);
670 mem = sg_dma_address(sg);
671 if (unlikely(!len)) {
672 dev_err(chan2dev(chan), "sg data length is zero\n");
673 goto spin_unlock;
674 }
675 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
676 __func__, i, len, mem);
677
678 desc = at_xdmac_get_desc(atchan);
679 if (!desc) {
680 dev_err(chan2dev(chan), "can't get descriptor\n");
681 if (first)
682 list_splice_init(&first->descs_list, &atchan->free_descs_list);
683 goto spin_unlock;
684 }
685
686 /* Linked list descriptor setup. */
687 if (direction == DMA_DEV_TO_MEM) {
688 desc->lld.mbr_sa = atchan->sconfig.src_addr;
689 desc->lld.mbr_da = mem;
690 } else {
691 desc->lld.mbr_sa = mem;
692 desc->lld.mbr_da = atchan->sconfig.dst_addr;
693 }
694 dwidth = at_xdmac_get_dwidth(atchan->cfg);
695 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
696 ? dwidth
697 : AT_XDMAC_CC_DWIDTH_BYTE;
698 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
699 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
700 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
701 | (len >> fixed_dwidth); /* microblock length */
702 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
703 AT_XDMAC_CC_DWIDTH(fixed_dwidth);
704 dev_dbg(chan2dev(chan),
705 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
706 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
707
708 /* Chain lld. */
709 if (prev)
710 at_xdmac_queue_desc(chan, prev, desc);
711
712 prev = desc;
713 if (!first)
714 first = desc;
715
716 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
717 __func__, desc, first);
718 list_add_tail(&desc->desc_node, &first->descs_list);
719 xfer_size += len;
720 }
721
722
723 first->tx_dma_desc.flags = flags;
724 first->xfer_size = xfer_size;
725 first->direction = direction;
726 ret = &first->tx_dma_desc;
727
728spin_unlock:
729 spin_unlock_irqrestore(&atchan->lock, irqflags);
730 return ret;
731}
732
733static struct dma_async_tx_descriptor *
734at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
735 size_t buf_len, size_t period_len,
736 enum dma_transfer_direction direction,
737 unsigned long flags)
738{
739 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
740 struct at_xdmac_desc *first = NULL, *prev = NULL;
741 unsigned int periods = buf_len / period_len;
742 int i;
743 unsigned long irqflags;
744
745 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
746 __func__, &buf_addr, buf_len, period_len,
747 direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
748
749 if (!is_slave_direction(direction)) {
750 dev_err(chan2dev(chan), "invalid DMA direction\n");
751 return NULL;
752 }
753
754 if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
755 dev_err(chan2dev(chan), "channel currently used\n");
756 return NULL;
757 }
758
759 if (at_xdmac_compute_chan_conf(chan, direction))
760 return NULL;
761
762 for (i = 0; i < periods; i++) {
763 struct at_xdmac_desc *desc = NULL;
764
765 spin_lock_irqsave(&atchan->lock, irqflags);
766 desc = at_xdmac_get_desc(atchan);
767 if (!desc) {
768 dev_err(chan2dev(chan), "can't get descriptor\n");
769 if (first)
770 list_splice_init(&first->descs_list, &atchan->free_descs_list);
771 spin_unlock_irqrestore(&atchan->lock, irqflags);
772 return NULL;
773 }
774 spin_unlock_irqrestore(&atchan->lock, irqflags);
775 dev_dbg(chan2dev(chan),
776 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
777 __func__, desc, &desc->tx_dma_desc.phys);
778
779 if (direction == DMA_DEV_TO_MEM) {
780 desc->lld.mbr_sa = atchan->sconfig.src_addr;
781 desc->lld.mbr_da = buf_addr + i * period_len;
782 } else {
783 desc->lld.mbr_sa = buf_addr + i * period_len;
784 desc->lld.mbr_da = atchan->sconfig.dst_addr;
785 }
786 desc->lld.mbr_cfg = atchan->cfg;
787 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
788 | AT_XDMAC_MBR_UBC_NDEN
789 | AT_XDMAC_MBR_UBC_NSEN
790 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
791
792 dev_dbg(chan2dev(chan),
793 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
794 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
795
796 /* Chain lld. */
797 if (prev)
798 at_xdmac_queue_desc(chan, prev, desc);
799
800 prev = desc;
801 if (!first)
802 first = desc;
803
804 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
805 __func__, desc, first);
806 list_add_tail(&desc->desc_node, &first->descs_list);
807 }
808
809 at_xdmac_queue_desc(chan, prev, first);
810 first->tx_dma_desc.flags = flags;
811 first->xfer_size = buf_len;
812 first->direction = direction;
813
814 return &first->tx_dma_desc;
815}
816
817static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
818{
819 u32 width;
820
821 /*
822 * Check address alignment to select the greater data width we
823 * can use.
824 *
825 * Some XDMAC implementations don't provide dword transfer, in
826 * this case selecting dword has the same behavior as
827 * selecting word transfers.
828 */
829 if (!(addr & 7)) {
830 width = AT_XDMAC_CC_DWIDTH_DWORD;
831 dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
832 } else if (!(addr & 3)) {
833 width = AT_XDMAC_CC_DWIDTH_WORD;
834 dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
835 } else if (!(addr & 1)) {
836 width = AT_XDMAC_CC_DWIDTH_HALFWORD;
837 dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
838 } else {
839 width = AT_XDMAC_CC_DWIDTH_BYTE;
840 dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
841 }
842
843 return width;
844}
845
846static struct at_xdmac_desc *
847at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
848 struct at_xdmac_chan *atchan,
849 struct at_xdmac_desc *prev,
850 dma_addr_t src, dma_addr_t dst,
851 struct dma_interleaved_template *xt,
852 struct data_chunk *chunk)
853{
854 struct at_xdmac_desc *desc;
855 u32 dwidth;
856 unsigned long flags;
857 size_t ublen;
858 /*
859 * WARNING: The channel configuration is set here since there is no
860 * dmaengine_slave_config call in this case. Moreover we don't know the
861 * direction, it involves we can't dynamically set the source and dest
862 * interface so we have to use the same one. Only interface 0 allows EBI
863 * access. Hopefully we can access DDR through both ports (at least on
864 * SAMA5D4x), so we can use the same interface for source and dest,
865 * that solves the fact we don't know the direction.
866 * ERRATA: Even if useless for memory transfers, the PERID has to not
867 * match the one of another channel. If not, it could lead to spurious
868 * flag status.
869 */
870 u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
871 | AT_XDMAC_CC_DIF(0)
872 | AT_XDMAC_CC_SIF(0)
873 | AT_XDMAC_CC_MBSIZE_SIXTEEN
874 | AT_XDMAC_CC_TYPE_MEM_TRAN;
875
876 dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
877 if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
878 dev_dbg(chan2dev(chan),
879 "%s: chunk too big (%zu, max size %lu)...\n",
880 __func__, chunk->size,
881 AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
882 return NULL;
883 }
884
885 if (prev)
886 dev_dbg(chan2dev(chan),
887 "Adding items at the end of desc 0x%p\n", prev);
888
889 if (xt->src_inc) {
890 if (xt->src_sgl)
891 chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
892 else
893 chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
894 }
895
896 if (xt->dst_inc) {
897 if (xt->dst_sgl)
898 chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
899 else
900 chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
901 }
902
903 spin_lock_irqsave(&atchan->lock, flags);
904 desc = at_xdmac_get_desc(atchan);
905 spin_unlock_irqrestore(&atchan->lock, flags);
906 if (!desc) {
907 dev_err(chan2dev(chan), "can't get descriptor\n");
908 return NULL;
909 }
910
911 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
912
913 ublen = chunk->size >> dwidth;
914
915 desc->lld.mbr_sa = src;
916 desc->lld.mbr_da = dst;
917 desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
918 desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);
919
920 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
921 | AT_XDMAC_MBR_UBC_NDEN
922 | AT_XDMAC_MBR_UBC_NSEN
923 | ublen;
924 desc->lld.mbr_cfg = chan_cc;
925
926 dev_dbg(chan2dev(chan),
927 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
928 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
929 desc->lld.mbr_ubc, desc->lld.mbr_cfg);
930
931 /* Chain lld. */
932 if (prev)
933 at_xdmac_queue_desc(chan, prev, desc);
934
935 return desc;
936}
937
938static struct dma_async_tx_descriptor *
939at_xdmac_prep_interleaved(struct dma_chan *chan,
940 struct dma_interleaved_template *xt,
941 unsigned long flags)
942{
943 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
944 struct at_xdmac_desc *prev = NULL, *first = NULL;
945 dma_addr_t dst_addr, src_addr;
946 size_t src_skip = 0, dst_skip = 0, len = 0;
947 struct data_chunk *chunk;
948 int i;
949
950 if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
951 return NULL;
952
953 /*
954 * TODO: Handle the case where we have to repeat a chain of
955 * descriptors...
956 */
957 if ((xt->numf > 1) && (xt->frame_size > 1))
958 return NULL;
959
960 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
961 __func__, &xt->src_start, &xt->dst_start, xt->numf,
962 xt->frame_size, flags);
963
964 src_addr = xt->src_start;
965 dst_addr = xt->dst_start;
966
967 if (xt->numf > 1) {
968 first = at_xdmac_interleaved_queue_desc(chan, atchan,
969 NULL,
970 src_addr, dst_addr,
971 xt, xt->sgl);
972
973 /* Length of the block is (BLEN+1) microblocks. */
974 for (i = 0; i < xt->numf - 1; i++)
975 at_xdmac_increment_block_count(chan, first);
976
977 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
978 __func__, first, first);
979 list_add_tail(&first->desc_node, &first->descs_list);
980 } else {
981 for (i = 0; i < xt->frame_size; i++) {
982 size_t src_icg = 0, dst_icg = 0;
983 struct at_xdmac_desc *desc;
984
985 chunk = xt->sgl + i;
986
987 dst_icg = dmaengine_get_dst_icg(xt, chunk);
988 src_icg = dmaengine_get_src_icg(xt, chunk);
989
990 src_skip = chunk->size + src_icg;
991 dst_skip = chunk->size + dst_icg;
992
993 dev_dbg(chan2dev(chan),
994 "%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
995 __func__, chunk->size, src_icg, dst_icg);
996
997 desc = at_xdmac_interleaved_queue_desc(chan, atchan,
998 prev,
999 src_addr, dst_addr,
1000 xt, chunk);
1001 if (!desc) {
1002 list_splice_init(&first->descs_list,
1003 &atchan->free_descs_list);
1004 return NULL;
1005 }
1006
1007 if (!first)
1008 first = desc;
1009
1010 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1011 __func__, desc, first);
1012 list_add_tail(&desc->desc_node, &first->descs_list);
1013
1014 if (xt->src_sgl)
1015 src_addr += src_skip;
1016
1017 if (xt->dst_sgl)
1018 dst_addr += dst_skip;
1019
1020 len += chunk->size;
1021 prev = desc;
1022 }
1023 }
1024
1025 first->tx_dma_desc.cookie = -EBUSY;
1026 first->tx_dma_desc.flags = flags;
1027 first->xfer_size = len;
1028
1029 return &first->tx_dma_desc;
1030}
1031
1032static struct dma_async_tx_descriptor *
1033at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1034 size_t len, unsigned long flags)
1035{
1036 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1037 struct at_xdmac_desc *first = NULL, *prev = NULL;
1038 size_t remaining_size = len, xfer_size = 0, ublen;
1039 dma_addr_t src_addr = src, dst_addr = dest;
1040 u32 dwidth;
1041 /*
1042 * WARNING: We don't know the direction, it involves we can't
1043 * dynamically set the source and dest interface so we have to use the
1044 * same one. Only interface 0 allows EBI access. Hopefully we can
1045 * access DDR through both ports (at least on SAMA5D4x), so we can use
1046 * the same interface for source and dest, that solves the fact we
1047 * don't know the direction.
1048 * ERRATA: Even if useless for memory transfers, the PERID has to not
1049 * match the one of another channel. If not, it could lead to spurious
1050 * flag status.
1051 */
1052 u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
1053 | AT_XDMAC_CC_DAM_INCREMENTED_AM
1054 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1055 | AT_XDMAC_CC_DIF(0)
1056 | AT_XDMAC_CC_SIF(0)
1057 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1058 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1059 unsigned long irqflags;
1060
1061 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
1062 __func__, &src, &dest, len, flags);
1063
1064 if (unlikely(!len))
1065 return NULL;
1066
1067 dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
1068
1069 /* Prepare descriptors. */
1070 while (remaining_size) {
1071 struct at_xdmac_desc *desc = NULL;
1072
1073 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
1074
1075 spin_lock_irqsave(&atchan->lock, irqflags);
1076 desc = at_xdmac_get_desc(atchan);
1077 spin_unlock_irqrestore(&atchan->lock, irqflags);
1078 if (!desc) {
1079 dev_err(chan2dev(chan), "can't get descriptor\n");
1080 if (first)
1081 list_splice_init(&first->descs_list, &atchan->free_descs_list);
1082 return NULL;
1083 }
1084
1085 /* Update src and dest addresses. */
1086 src_addr += xfer_size;
1087 dst_addr += xfer_size;
1088
1089 if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
1090 xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
1091 else
1092 xfer_size = remaining_size;
1093
1094 dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
1095
1096 /* Check remaining length and change data width if needed. */
1097 dwidth = at_xdmac_align_width(chan,
1098 src_addr | dst_addr | xfer_size);
1099 chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
1100 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1101
1102 ublen = xfer_size >> dwidth;
1103 remaining_size -= xfer_size;
1104
1105 desc->lld.mbr_sa = src_addr;
1106 desc->lld.mbr_da = dst_addr;
1107 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
1108 | AT_XDMAC_MBR_UBC_NDEN
1109 | AT_XDMAC_MBR_UBC_NSEN
1110 | ublen;
1111 desc->lld.mbr_cfg = chan_cc;
1112
1113 dev_dbg(chan2dev(chan),
1114 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1115 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
1116
1117 /* Chain lld. */
1118 if (prev)
1119 at_xdmac_queue_desc(chan, prev, desc);
1120
1121 prev = desc;
1122 if (!first)
1123 first = desc;
1124
1125 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1126 __func__, desc, first);
1127 list_add_tail(&desc->desc_node, &first->descs_list);
1128 }
1129
1130 first->tx_dma_desc.flags = flags;
1131 first->xfer_size = len;
1132
1133 return &first->tx_dma_desc;
1134}
1135
1136static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1137 struct at_xdmac_chan *atchan,
1138 dma_addr_t dst_addr,
1139 size_t len,
1140 int value)
1141{
1142 struct at_xdmac_desc *desc;
1143 unsigned long flags;
1144 size_t ublen;
1145 u32 dwidth;
1146 /*
1147 * WARNING: The channel configuration is set here since there is no
1148 * dmaengine_slave_config call in this case. Moreover we don't know the
1149 * direction, it involves we can't dynamically set the source and dest
1150 * interface so we have to use the same one. Only interface 0 allows EBI
1151 * access. Hopefully we can access DDR through both ports (at least on
1152 * SAMA5D4x), so we can use the same interface for source and dest,
1153 * that solves the fact we don't know the direction.
1154 * ERRATA: Even if useless for memory transfers, the PERID has to not
1155 * match the one of another channel. If not, it could lead to spurious
1156 * flag status.
1157 */
1158 u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
1159 | AT_XDMAC_CC_DAM_UBS_AM
1160 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1161 | AT_XDMAC_CC_DIF(0)
1162 | AT_XDMAC_CC_SIF(0)
1163 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1164 | AT_XDMAC_CC_MEMSET_HW_MODE
1165 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1166
1167 dwidth = at_xdmac_align_width(chan, dst_addr);
1168
1169 if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1170 dev_err(chan2dev(chan),
1171 "%s: Transfer too large, aborting...\n",
1172 __func__);
1173 return NULL;
1174 }
1175
1176 spin_lock_irqsave(&atchan->lock, flags);
1177 desc = at_xdmac_get_desc(atchan);
1178 spin_unlock_irqrestore(&atchan->lock, flags);
1179 if (!desc) {
1180 dev_err(chan2dev(chan), "can't get descriptor\n");
1181 return NULL;
1182 }
1183
1184 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1185
1186 ublen = len >> dwidth;
1187
1188 desc->lld.mbr_da = dst_addr;
1189 desc->lld.mbr_ds = value;
1190 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1191 | AT_XDMAC_MBR_UBC_NDEN
1192 | AT_XDMAC_MBR_UBC_NSEN
1193 | ublen;
1194 desc->lld.mbr_cfg = chan_cc;
1195
1196 dev_dbg(chan2dev(chan),
1197 "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1198 __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
1199 desc->lld.mbr_cfg);
1200
1201 return desc;
1202}
1203
1204static struct dma_async_tx_descriptor *
1205at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1206 size_t len, unsigned long flags)
1207{
1208 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1209 struct at_xdmac_desc *desc;
1210
1211 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
1212 __func__, &dest, len, value, flags);
1213
1214 if (unlikely(!len))
1215 return NULL;
1216
1217 desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
1218 list_add_tail(&desc->desc_node, &desc->descs_list);
1219
1220 desc->tx_dma_desc.cookie = -EBUSY;
1221 desc->tx_dma_desc.flags = flags;
1222 desc->xfer_size = len;
1223
1224 return &desc->tx_dma_desc;
1225}
1226
1227static struct dma_async_tx_descriptor *
1228at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1229 unsigned int sg_len, int value,
1230 unsigned long flags)
1231{
1232 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1233 struct at_xdmac_desc *desc, *pdesc = NULL,
1234 *ppdesc = NULL, *first = NULL;
1235 struct scatterlist *sg, *psg = NULL, *ppsg = NULL;
1236 size_t stride = 0, pstride = 0, len = 0;
1237 int i;
1238
1239 if (!sgl)
1240 return NULL;
1241
1242 dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
1243 __func__, sg_len, value, flags);
1244
1245 /* Prepare descriptors. */
1246 for_each_sg(sgl, sg, sg_len, i) {
1247 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1248 __func__, &sg_dma_address(sg), sg_dma_len(sg),
1249 value, flags);
1250 desc = at_xdmac_memset_create_desc(chan, atchan,
1251 sg_dma_address(sg),
1252 sg_dma_len(sg),
1253 value);
1254 if (!desc && first)
1255 list_splice_init(&first->descs_list,
1256 &atchan->free_descs_list);
1257
1258 if (!first)
1259 first = desc;
1260
1261 /* Update our strides */
1262 pstride = stride;
1263 if (psg)
1264 stride = sg_dma_address(sg) -
1265 (sg_dma_address(psg) + sg_dma_len(psg));
1266
1267 /*
1268 * The scatterlist API gives us only the address and
1269 * length of each elements.
1270 *
1271 * Unfortunately, we don't have the stride, which we
1272 * will need to compute.
1273 *
1274 * That make us end up in a situation like this one:
1275 * len stride len stride len
1276 * +-------+ +-------+ +-------+
1277 * | N-2 | | N-1 | | N |
1278 * +-------+ +-------+ +-------+
1279 *
1280 * We need all these three elements (N-2, N-1 and N)
1281 * to actually take the decision on whether we need to
1282 * queue N-1 or reuse N-2.
1283 *
1284 * We will only consider N if it is the last element.
1285 */
1286 if (ppdesc && pdesc) {
1287 if ((stride == pstride) &&
1288 (sg_dma_len(ppsg) == sg_dma_len(psg))) {
1289 dev_dbg(chan2dev(chan),
1290 "%s: desc 0x%p can be merged with desc 0x%p\n",
1291 __func__, pdesc, ppdesc);
1292
1293 /*
1294 * Increment the block count of the
1295 * N-2 descriptor
1296 */
1297 at_xdmac_increment_block_count(chan, ppdesc);
1298 ppdesc->lld.mbr_dus = stride;
1299
1300 /*
1301 * Put back the N-1 descriptor in the
1302 * free descriptor list
1303 */
1304 list_add_tail(&pdesc->desc_node,
1305 &atchan->free_descs_list);
1306
1307 /*
1308 * Make our N-1 descriptor pointer
1309 * point to the N-2 since they were
1310 * actually merged.
1311 */
1312 pdesc = ppdesc;
1313
1314 /*
1315 * Rule out the case where we don't have
1316 * pstride computed yet (our second sg
1317 * element)
1318 *
1319 * We also want to catch the case where there
1320 * would be a negative stride,
1321 */
1322 } else if (pstride ||
1323 sg_dma_address(sg) < sg_dma_address(psg)) {
1324 /*
1325 * Queue the N-1 descriptor after the
1326 * N-2
1327 */
1328 at_xdmac_queue_desc(chan, ppdesc, pdesc);
1329
1330 /*
1331 * Add the N-1 descriptor to the list
1332 * of the descriptors used for this
1333 * transfer
1334 */
1335 list_add_tail(&desc->desc_node,
1336 &first->descs_list);
1337 dev_dbg(chan2dev(chan),
1338 "%s: add desc 0x%p to descs_list 0x%p\n",
1339 __func__, desc, first);
1340 }
1341 }
1342
1343 /*
1344 * If we are the last element, just see if we have the
1345 * same size than the previous element.
1346 *
1347 * If so, we can merge it with the previous descriptor
1348 * since we don't care about the stride anymore.
1349 */
1350 if ((i == (sg_len - 1)) &&
1351 sg_dma_len(psg) == sg_dma_len(sg)) {
1352 dev_dbg(chan2dev(chan),
1353 "%s: desc 0x%p can be merged with desc 0x%p\n",
1354 __func__, desc, pdesc);
1355
1356 /*
1357 * Increment the block count of the N-1
1358 * descriptor
1359 */
1360 at_xdmac_increment_block_count(chan, pdesc);
1361 pdesc->lld.mbr_dus = stride;
1362
1363 /*
1364 * Put back the N descriptor in the free
1365 * descriptor list
1366 */
1367 list_add_tail(&desc->desc_node,
1368 &atchan->free_descs_list);
1369 }
1370
1371 /* Update our descriptors */
1372 ppdesc = pdesc;
1373 pdesc = desc;
1374
1375 /* Update our scatter pointers */
1376 ppsg = psg;
1377 psg = sg;
1378
1379 len += sg_dma_len(sg);
1380 }
1381
1382 first->tx_dma_desc.cookie = -EBUSY;
1383 first->tx_dma_desc.flags = flags;
1384 first->xfer_size = len;
1385
1386 return &first->tx_dma_desc;
1387}
1388
1389static enum dma_status
1390at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1391 struct dma_tx_state *txstate)
1392{
1393 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1394 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1395 struct at_xdmac_desc *desc, *_desc;
1396 struct list_head *descs_list;
1397 enum dma_status ret;
1398 int residue, retry;
1399 u32 cur_nda, check_nda, cur_ubc, mask, value;
1400 u8 dwidth = 0;
1401 unsigned long flags;
1402 bool initd;
1403
1404 ret = dma_cookie_status(chan, cookie, txstate);
1405 if (ret == DMA_COMPLETE)
1406 return ret;
1407
1408 if (!txstate)
1409 return ret;
1410
1411 spin_lock_irqsave(&atchan->lock, flags);
1412
1413 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1414
1415 /*
1416 * If the transfer has not been started yet, don't need to compute the
1417 * residue, it's the transfer length.
1418 */
1419 if (!desc->active_xfer) {
1420 dma_set_residue(txstate, desc->xfer_size);
1421 goto spin_unlock;
1422 }
1423
1424 residue = desc->xfer_size;
1425 /*
1426 * Flush FIFO: only relevant when the transfer is source peripheral
1427 * synchronized. Flush is needed before reading CUBC because data in
1428 * the FIFO are not reported by CUBC. Reporting a residue of the
1429 * transfer length while we have data in FIFO can cause issue.
1430 * Usecase: atmel USART has a timeout which means I have received
1431 * characters but there is no more character received for a while. On
1432 * timeout, it requests the residue. If the data are in the DMA FIFO,
1433 * we will return a residue of the transfer length. It means no data
1434 * received. If an application is waiting for these data, it will hang
1435 * since we won't have another USART timeout without receiving new
1436 * data.
1437 */
1438 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
1439 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
1440 if ((desc->lld.mbr_cfg & mask) == value) {
1441 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
1442 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1443 cpu_relax();
1444 }
1445
1446 /*
1447 * The easiest way to compute the residue should be to pause the DMA
1448 * but doing this can lead to miss some data as some devices don't
1449 * have FIFO.
1450 * We need to read several registers because:
1451 * - DMA is running therefore a descriptor change is possible while
1452 * reading these registers
1453 * - When the block transfer is done, the value of the CUBC register
1454 * is set to its initial value until the fetch of the next descriptor.
1455 * This value will corrupt the residue calculation so we have to skip
1456 * it.
1457 *
1458 * INITD -------- ------------
1459 * |____________________|
1460 * _______________________ _______________
1461 * NDA @desc2 \/ @desc3
1462 * _______________________/\_______________
1463 * __________ ___________ _______________
1464 * CUBC 0 \/ MAX desc1 \/ MAX desc2
1465 * __________/\___________/\_______________
1466 *
1467 * Since descriptors are aligned on 64 bits, we can assume that
1468 * the update of NDA and CUBC is atomic.
1469 * Memory barriers are used to ensure the read order of the registers.
1470 * A max number of retries is set because unlikely it could never ends.
1471 */
1472 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1473 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1474 rmb();
1475 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1476 rmb();
1477 initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1478 rmb();
1479 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1480 rmb();
1481
1482 if ((check_nda == cur_nda) && initd)
1483 break;
1484 }
1485
1486 if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
1487 ret = DMA_ERROR;
1488 goto spin_unlock;
1489 }
1490
1491 /*
1492 * Flush FIFO: only relevant when the transfer is source peripheral
1493 * synchronized. Another flush is needed here because CUBC is updated
1494 * when the controller sends the data write command. It can lead to
1495 * report data that are not written in the memory or the device. The
1496 * FIFO flush ensures that data are really written.
1497 */
1498 if ((desc->lld.mbr_cfg & mask) == value) {
1499 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
1500 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1501 cpu_relax();
1502 }
1503
1504 /*
1505 * Remove size of all microblocks already transferred and the current
1506 * one. Then add the remaining size to transfer of the current
1507 * microblock.
1508 */
1509 descs_list = &desc->descs_list;
1510 list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
1511 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
1512 residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
1513 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
1514 break;
1515 }
1516 residue += cur_ubc << dwidth;
1517
1518 dma_set_residue(txstate, residue);
1519
1520 dev_dbg(chan2dev(chan),
1521 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1522 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
1523
1524spin_unlock:
1525 spin_unlock_irqrestore(&atchan->lock, flags);
1526 return ret;
1527}
1528
1529/* Call must be protected by lock. */
1530static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
1531 struct at_xdmac_desc *desc)
1532{
1533 dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1534
1535 /*
1536 * Remove the transfer from the transfer list then move the transfer
1537 * descriptors into the free descriptors list.
1538 */
1539 list_del(&desc->xfer_node);
1540 list_splice_init(&desc->descs_list, &atchan->free_descs_list);
1541}
1542
1543static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1544{
1545 struct at_xdmac_desc *desc;
1546 unsigned long flags;
1547
1548 spin_lock_irqsave(&atchan->lock, flags);
1549
1550 /*
1551 * If channel is enabled, do nothing, advance_work will be triggered
1552 * after the interruption.
1553 */
1554 if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
1555 desc = list_first_entry(&atchan->xfers_list,
1556 struct at_xdmac_desc,
1557 xfer_node);
1558 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1559 if (!desc->active_xfer)
1560 at_xdmac_start_xfer(atchan, desc);
1561 }
1562
1563 spin_unlock_irqrestore(&atchan->lock, flags);
1564}
1565
1566static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
1567{
1568 struct at_xdmac_desc *desc;
1569 struct dma_async_tx_descriptor *txd;
1570
1571 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1572 txd = &desc->tx_dma_desc;
1573
1574 if (txd->flags & DMA_PREP_INTERRUPT)
1575 dmaengine_desc_get_callback_invoke(txd, NULL);
1576}
1577
1578static void at_xdmac_tasklet(unsigned long data)
1579{
1580 struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
1581 struct at_xdmac_desc *desc;
1582 u32 error_mask;
1583
1584 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1585 __func__, atchan->irq_status);
1586
1587 error_mask = AT_XDMAC_CIS_RBEIS
1588 | AT_XDMAC_CIS_WBEIS
1589 | AT_XDMAC_CIS_ROIS;
1590
1591 if (at_xdmac_chan_is_cyclic(atchan)) {
1592 at_xdmac_handle_cyclic(atchan);
1593 } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
1594 || (atchan->irq_status & error_mask)) {
1595 struct dma_async_tx_descriptor *txd;
1596
1597 if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1598 dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1599 if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1600 dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1601 if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1602 dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1603
1604 spin_lock(&atchan->lock);
1605 desc = list_first_entry(&atchan->xfers_list,
1606 struct at_xdmac_desc,
1607 xfer_node);
1608 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1609 BUG_ON(!desc->active_xfer);
1610
1611 txd = &desc->tx_dma_desc;
1612
1613 at_xdmac_remove_xfer(atchan, desc);
1614 spin_unlock(&atchan->lock);
1615
1616 if (!at_xdmac_chan_is_cyclic(atchan)) {
1617 dma_cookie_complete(txd);
1618 if (txd->flags & DMA_PREP_INTERRUPT)
1619 dmaengine_desc_get_callback_invoke(txd, NULL);
1620 }
1621
1622 dma_run_dependencies(txd);
1623
1624 at_xdmac_advance_work(atchan);
1625 }
1626}
1627
1628static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1629{
1630 struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
1631 struct at_xdmac_chan *atchan;
1632 u32 imr, status, pending;
1633 u32 chan_imr, chan_status;
1634 int i, ret = IRQ_NONE;
1635
1636 do {
1637 imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1638 status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
1639 pending = status & imr;
1640
1641 dev_vdbg(atxdmac->dma.dev,
1642 "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
1643 __func__, status, imr, pending);
1644
1645 if (!pending)
1646 break;
1647
1648 /* We have to find which channel has generated the interrupt. */
1649 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1650 if (!((1 << i) & pending))
1651 continue;
1652
1653 atchan = &atxdmac->chan[i];
1654 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1655 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1656 atchan->irq_status = chan_status & chan_imr;
1657 dev_vdbg(atxdmac->dma.dev,
1658 "%s: chan%d: imr=0x%x, status=0x%x\n",
1659 __func__, i, chan_imr, chan_status);
1660 dev_vdbg(chan2dev(&atchan->chan),
1661 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
1662 __func__,
1663 at_xdmac_chan_read(atchan, AT_XDMAC_CC),
1664 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
1665 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
1666 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
1667 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1668 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1669
1670 if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1671 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1672
1673 tasklet_schedule(&atchan->tasklet);
1674 ret = IRQ_HANDLED;
1675 }
1676
1677 } while (pending);
1678
1679 return ret;
1680}
1681
1682static void at_xdmac_issue_pending(struct dma_chan *chan)
1683{
1684 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1685
1686 dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
1687
1688 if (!at_xdmac_chan_is_cyclic(atchan))
1689 at_xdmac_advance_work(atchan);
1690
1691 return;
1692}
1693
1694static int at_xdmac_device_config(struct dma_chan *chan,
1695 struct dma_slave_config *config)
1696{
1697 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1698 int ret;
1699 unsigned long flags;
1700
1701 dev_dbg(chan2dev(chan), "%s\n", __func__);
1702
1703 spin_lock_irqsave(&atchan->lock, flags);
1704 ret = at_xdmac_set_slave_config(chan, config);
1705 spin_unlock_irqrestore(&atchan->lock, flags);
1706
1707 return ret;
1708}
1709
1710static int at_xdmac_device_pause(struct dma_chan *chan)
1711{
1712 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1713 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1714 unsigned long flags;
1715
1716 dev_dbg(chan2dev(chan), "%s\n", __func__);
1717
1718 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1719 return 0;
1720
1721 spin_lock_irqsave(&atchan->lock, flags);
1722 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
1723 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
1724 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1725 cpu_relax();
1726 spin_unlock_irqrestore(&atchan->lock, flags);
1727
1728 return 0;
1729}
1730
1731static int at_xdmac_device_resume(struct dma_chan *chan)
1732{
1733 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1734 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1735 unsigned long flags;
1736
1737 dev_dbg(chan2dev(chan), "%s\n", __func__);
1738
1739 spin_lock_irqsave(&atchan->lock, flags);
1740 if (!at_xdmac_chan_is_paused(atchan)) {
1741 spin_unlock_irqrestore(&atchan->lock, flags);
1742 return 0;
1743 }
1744
1745 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
1746 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1747 spin_unlock_irqrestore(&atchan->lock, flags);
1748
1749 return 0;
1750}
1751
1752static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1753{
1754 struct at_xdmac_desc *desc, *_desc;
1755 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1756 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1757 unsigned long flags;
1758
1759 dev_dbg(chan2dev(chan), "%s\n", __func__);
1760
1761 spin_lock_irqsave(&atchan->lock, flags);
1762 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1763 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1764 cpu_relax();
1765
1766 /* Cancel all pending transfers. */
1767 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1768 at_xdmac_remove_xfer(atchan, desc);
1769
1770 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1771 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1772 spin_unlock_irqrestore(&atchan->lock, flags);
1773
1774 return 0;
1775}
1776
1777static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1778{
1779 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1780 struct at_xdmac_desc *desc;
1781 int i;
1782 unsigned long flags;
1783
1784 spin_lock_irqsave(&atchan->lock, flags);
1785
1786 if (at_xdmac_chan_is_enabled(atchan)) {
1787 dev_err(chan2dev(chan),
1788 "can't allocate channel resources (channel enabled)\n");
1789 i = -EIO;
1790 goto spin_unlock;
1791 }
1792
1793 if (!list_empty(&atchan->free_descs_list)) {
1794 dev_err(chan2dev(chan),
1795 "can't allocate channel resources (channel not free from a previous use)\n");
1796 i = -EIO;
1797 goto spin_unlock;
1798 }
1799
1800 for (i = 0; i < init_nr_desc_per_channel; i++) {
1801 desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
1802 if (!desc) {
1803 dev_warn(chan2dev(chan),
1804 "only %d descriptors have been allocated\n", i);
1805 break;
1806 }
1807 list_add_tail(&desc->desc_node, &atchan->free_descs_list);
1808 }
1809
1810 dma_cookie_init(chan);
1811
1812 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1813
1814spin_unlock:
1815 spin_unlock_irqrestore(&atchan->lock, flags);
1816 return i;
1817}
1818
1819static void at_xdmac_free_chan_resources(struct dma_chan *chan)
1820{
1821 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1822 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
1823 struct at_xdmac_desc *desc, *_desc;
1824
1825 list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
1826 dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
1827 list_del(&desc->desc_node);
1828 dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
1829 }
1830
1831 return;
1832}
1833
1834#ifdef CONFIG_PM
1835static int atmel_xdmac_prepare(struct device *dev)
1836{
1837 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
1838 struct dma_chan *chan, *_chan;
1839
1840 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1841 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1842
1843 /* Wait for transfer completion, except in cyclic case. */
1844 if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
1845 return -EAGAIN;
1846 }
1847 return 0;
1848}
1849#else
1850# define atmel_xdmac_prepare NULL
1851#endif
1852
1853#ifdef CONFIG_PM_SLEEP
1854static int atmel_xdmac_suspend(struct device *dev)
1855{
1856 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
1857 struct dma_chan *chan, *_chan;
1858
1859 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1860 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1861
1862 atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
1863 if (at_xdmac_chan_is_cyclic(atchan)) {
1864 if (!at_xdmac_chan_is_paused(atchan))
1865 at_xdmac_device_pause(chan);
1866 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1867 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
1868 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
1869 }
1870 }
1871 atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1872
1873 at_xdmac_off(atxdmac);
1874 clk_disable_unprepare(atxdmac->clk);
1875 return 0;
1876}
1877
1878static int atmel_xdmac_resume(struct device *dev)
1879{
1880 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
1881 struct at_xdmac_chan *atchan;
1882 struct dma_chan *chan, *_chan;
1883 int i;
1884 int ret;
1885
1886 ret = clk_prepare_enable(atxdmac->clk);
1887 if (ret)
1888 return ret;
1889
1890 /* Clear pending interrupts. */
1891 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1892 atchan = &atxdmac->chan[i];
1893 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
1894 cpu_relax();
1895 }
1896
1897 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
1898 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1899 atchan = to_at_xdmac_chan(chan);
1900 at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
1901 if (at_xdmac_chan_is_cyclic(atchan)) {
1902 if (at_xdmac_chan_is_paused(atchan))
1903 at_xdmac_device_resume(chan);
1904 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
1905 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
1906 at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
1907 wmb();
1908 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
1909 }
1910 }
1911 return 0;
1912}
1913#endif /* CONFIG_PM_SLEEP */
1914
1915static int at_xdmac_probe(struct platform_device *pdev)
1916{
1917 struct resource *res;
1918 struct at_xdmac *atxdmac;
1919 int irq, size, nr_channels, i, ret;
1920 void __iomem *base;
1921 u32 reg;
1922
1923 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1924 if (!res)
1925 return -EINVAL;
1926
1927 irq = platform_get_irq(pdev, 0);
1928 if (irq < 0)
1929 return irq;
1930
1931 base = devm_ioremap_resource(&pdev->dev, res);
1932 if (IS_ERR(base))
1933 return PTR_ERR(base);
1934
1935 /*
1936 * Read number of xdmac channels, read helper function can't be used
1937 * since atxdmac is not yet allocated and we need to know the number
1938 * of channels to do the allocation.
1939 */
1940 reg = readl_relaxed(base + AT_XDMAC_GTYPE);
1941 nr_channels = AT_XDMAC_NB_CH(reg);
1942 if (nr_channels > AT_XDMAC_MAX_CHAN) {
1943 dev_err(&pdev->dev, "invalid number of channels (%u)\n",
1944 nr_channels);
1945 return -EINVAL;
1946 }
1947
1948 size = sizeof(*atxdmac);
1949 size += nr_channels * sizeof(struct at_xdmac_chan);
1950 atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
1951 if (!atxdmac) {
1952 dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
1953 return -ENOMEM;
1954 }
1955
1956 atxdmac->regs = base;
1957 atxdmac->irq = irq;
1958
1959 atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
1960 if (IS_ERR(atxdmac->clk)) {
1961 dev_err(&pdev->dev, "can't get dma_clk\n");
1962 return PTR_ERR(atxdmac->clk);
1963 }
1964
1965 /* Do not use dev res to prevent races with tasklet */
1966 ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
1967 if (ret) {
1968 dev_err(&pdev->dev, "can't request irq\n");
1969 return ret;
1970 }
1971
1972 ret = clk_prepare_enable(atxdmac->clk);
1973 if (ret) {
1974 dev_err(&pdev->dev, "can't prepare or enable clock\n");
1975 goto err_free_irq;
1976 }
1977
1978 atxdmac->at_xdmac_desc_pool =
1979 dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
1980 sizeof(struct at_xdmac_desc), 4, 0);
1981 if (!atxdmac->at_xdmac_desc_pool) {
1982 dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
1983 ret = -ENOMEM;
1984 goto err_clk_disable;
1985 }
1986
1987 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
1988 dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
1989 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
1990 dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
1991 dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
1992 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
1993 /*
1994 * Without DMA_PRIVATE the driver is not able to allocate more than
1995 * one channel, second allocation fails in private_candidate.
1996 */
1997 dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
1998 atxdmac->dma.dev = &pdev->dev;
1999 atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
2000 atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
2001 atxdmac->dma.device_tx_status = at_xdmac_tx_status;
2002 atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
2003 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
2004 atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved;
2005 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
2006 atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset;
2007 atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg;
2008 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
2009 atxdmac->dma.device_config = at_xdmac_device_config;
2010 atxdmac->dma.device_pause = at_xdmac_device_pause;
2011 atxdmac->dma.device_resume = at_xdmac_device_resume;
2012 atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all;
2013 atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2014 atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2015 atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2016 atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2017
2018 /* Disable all chans and interrupts. */
2019 at_xdmac_off(atxdmac);
2020
2021 /* Init channels. */
2022 INIT_LIST_HEAD(&atxdmac->dma.channels);
2023 for (i = 0; i < nr_channels; i++) {
2024 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2025
2026 atchan->chan.device = &atxdmac->dma;
2027 list_add_tail(&atchan->chan.device_node,
2028 &atxdmac->dma.channels);
2029
2030 atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
2031 atchan->mask = 1 << i;
2032
2033 spin_lock_init(&atchan->lock);
2034 INIT_LIST_HEAD(&atchan->xfers_list);
2035 INIT_LIST_HEAD(&atchan->free_descs_list);
2036 tasklet_init(&atchan->tasklet, at_xdmac_tasklet,
2037 (unsigned long)atchan);
2038
2039 /* Clear pending interrupts. */
2040 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2041 cpu_relax();
2042 }
2043 platform_set_drvdata(pdev, atxdmac);
2044
2045 ret = dma_async_device_register(&atxdmac->dma);
2046 if (ret) {
2047 dev_err(&pdev->dev, "fail to register DMA engine device\n");
2048 goto err_clk_disable;
2049 }
2050
2051 ret = of_dma_controller_register(pdev->dev.of_node,
2052 at_xdmac_xlate, atxdmac);
2053 if (ret) {
2054 dev_err(&pdev->dev, "could not register of dma controller\n");
2055 goto err_dma_unregister;
2056 }
2057
2058 dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
2059 nr_channels, atxdmac->regs);
2060
2061 return 0;
2062
2063err_dma_unregister:
2064 dma_async_device_unregister(&atxdmac->dma);
2065err_clk_disable:
2066 clk_disable_unprepare(atxdmac->clk);
2067err_free_irq:
2068 free_irq(atxdmac->irq, atxdmac);
2069 return ret;
2070}
2071
2072static int at_xdmac_remove(struct platform_device *pdev)
2073{
2074 struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2075 int i;
2076
2077 at_xdmac_off(atxdmac);
2078 of_dma_controller_free(pdev->dev.of_node);
2079 dma_async_device_unregister(&atxdmac->dma);
2080 clk_disable_unprepare(atxdmac->clk);
2081
2082 free_irq(atxdmac->irq, atxdmac);
2083
2084 for (i = 0; i < atxdmac->dma.chancnt; i++) {
2085 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2086
2087 tasklet_kill(&atchan->tasklet);
2088 at_xdmac_free_chan_resources(&atchan->chan);
2089 }
2090
2091 return 0;
2092}
2093
2094static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
2095 .prepare = atmel_xdmac_prepare,
2096 SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
2097};
2098
2099static const struct of_device_id atmel_xdmac_dt_ids[] = {
2100 {
2101 .compatible = "atmel,sama5d4-dma",
2102 }, {
2103 /* sentinel */
2104 }
2105};
2106MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
2107
2108static struct platform_driver at_xdmac_driver = {
2109 .probe = at_xdmac_probe,
2110 .remove = at_xdmac_remove,
2111 .driver = {
2112 .name = "at_xdmac",
2113 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
2114 .pm = &atmel_xdmac_dev_pm_ops,
2115 }
2116};
2117
2118static int __init at_xdmac_init(void)
2119{
2120 return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
2121}
2122subsys_initcall(at_xdmac_init);
2123
2124MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
2125MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
2126MODULE_LICENSE("GPL");
2127

Warning: That file was not part of the compilation database. It may have many parsing errors.