1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // |
3 | // Actions Semi Owl SoCs DMA driver |
4 | // |
5 | // Copyright (c) 2014 Actions Semi Inc. |
6 | // Author: David Liu <liuwei@actions-semi.com> |
7 | // |
8 | // Copyright (c) 2018 Linaro Ltd. |
9 | // Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> |
10 | |
11 | #include <linux/bitops.h> |
12 | #include <linux/clk.h> |
13 | #include <linux/delay.h> |
14 | #include <linux/dmaengine.h> |
15 | #include <linux/dma-mapping.h> |
16 | #include <linux/dmapool.h> |
17 | #include <linux/err.h> |
18 | #include <linux/init.h> |
19 | #include <linux/interrupt.h> |
20 | #include <linux/io.h> |
21 | #include <linux/mm.h> |
22 | #include <linux/module.h> |
23 | #include <linux/of.h> |
24 | #include <linux/of_dma.h> |
25 | #include <linux/platform_device.h> |
26 | #include <linux/slab.h> |
27 | #include "virt-dma.h" |
28 | |
29 | #define OWL_DMA_FRAME_MAX_LENGTH 0xfffff |
30 | |
31 | /* Global DMA Controller Registers */ |
32 | #define OWL_DMA_IRQ_PD0 0x00 |
33 | #define OWL_DMA_IRQ_PD1 0x04 |
34 | #define OWL_DMA_IRQ_PD2 0x08 |
35 | #define OWL_DMA_IRQ_PD3 0x0C |
36 | #define OWL_DMA_IRQ_EN0 0x10 |
37 | #define OWL_DMA_IRQ_EN1 0x14 |
38 | #define OWL_DMA_IRQ_EN2 0x18 |
39 | #define OWL_DMA_IRQ_EN3 0x1C |
40 | #define OWL_DMA_SECURE_ACCESS_CTL 0x20 |
41 | #define OWL_DMA_NIC_QOS 0x24 |
42 | #define OWL_DMA_DBGSEL 0x28 |
43 | #define OWL_DMA_IDLE_STAT 0x2C |
44 | |
45 | /* Channel Registers */ |
46 | #define OWL_DMA_CHAN_BASE(i) (0x100 + (i) * 0x100) |
47 | #define OWL_DMAX_MODE 0x00 |
48 | #define OWL_DMAX_SOURCE 0x04 |
49 | #define OWL_DMAX_DESTINATION 0x08 |
50 | #define OWL_DMAX_FRAME_LEN 0x0C |
51 | #define OWL_DMAX_FRAME_CNT 0x10 |
52 | #define OWL_DMAX_REMAIN_FRAME_CNT 0x14 |
53 | #define OWL_DMAX_REMAIN_CNT 0x18 |
54 | #define OWL_DMAX_SOURCE_STRIDE 0x1C |
55 | #define OWL_DMAX_DESTINATION_STRIDE 0x20 |
56 | #define OWL_DMAX_START 0x24 |
57 | #define OWL_DMAX_PAUSE 0x28 |
58 | #define OWL_DMAX_CHAINED_CTL 0x2C |
59 | #define OWL_DMAX_CONSTANT 0x30 |
60 | #define OWL_DMAX_LINKLIST_CTL 0x34 |
61 | #define OWL_DMAX_NEXT_DESCRIPTOR 0x38 |
62 | #define OWL_DMAX_CURRENT_DESCRIPTOR_NUM 0x3C |
63 | #define OWL_DMAX_INT_CTL 0x40 |
64 | #define OWL_DMAX_INT_STATUS 0x44 |
65 | #define OWL_DMAX_CURRENT_SOURCE_POINTER 0x48 |
66 | #define OWL_DMAX_CURRENT_DESTINATION_POINTER 0x4C |
67 | |
68 | /* OWL_DMAX_MODE Bits */ |
69 | #define OWL_DMA_MODE_TS(x) (((x) & GENMASK(5, 0)) << 0) |
70 | #define OWL_DMA_MODE_ST(x) (((x) & GENMASK(1, 0)) << 8) |
71 | #define OWL_DMA_MODE_ST_DEV OWL_DMA_MODE_ST(0) |
72 | #define OWL_DMA_MODE_ST_DCU OWL_DMA_MODE_ST(2) |
73 | #define OWL_DMA_MODE_ST_SRAM OWL_DMA_MODE_ST(3) |
74 | #define OWL_DMA_MODE_DT(x) (((x) & GENMASK(1, 0)) << 10) |
75 | #define OWL_DMA_MODE_DT_DEV OWL_DMA_MODE_DT(0) |
76 | #define OWL_DMA_MODE_DT_DCU OWL_DMA_MODE_DT(2) |
77 | #define OWL_DMA_MODE_DT_SRAM OWL_DMA_MODE_DT(3) |
78 | #define OWL_DMA_MODE_SAM(x) (((x) & GENMASK(1, 0)) << 16) |
79 | #define OWL_DMA_MODE_SAM_CONST OWL_DMA_MODE_SAM(0) |
80 | #define OWL_DMA_MODE_SAM_INC OWL_DMA_MODE_SAM(1) |
81 | #define OWL_DMA_MODE_SAM_STRIDE OWL_DMA_MODE_SAM(2) |
82 | #define OWL_DMA_MODE_DAM(x) (((x) & GENMASK(1, 0)) << 18) |
83 | #define OWL_DMA_MODE_DAM_CONST OWL_DMA_MODE_DAM(0) |
84 | #define OWL_DMA_MODE_DAM_INC OWL_DMA_MODE_DAM(1) |
85 | #define OWL_DMA_MODE_DAM_STRIDE OWL_DMA_MODE_DAM(2) |
86 | #define OWL_DMA_MODE_PW(x) (((x) & GENMASK(2, 0)) << 20) |
87 | #define OWL_DMA_MODE_CB BIT(23) |
88 | #define OWL_DMA_MODE_NDDBW(x) (((x) & 0x1) << 28) |
89 | #define OWL_DMA_MODE_NDDBW_32BIT OWL_DMA_MODE_NDDBW(0) |
90 | #define OWL_DMA_MODE_NDDBW_8BIT OWL_DMA_MODE_NDDBW(1) |
91 | #define OWL_DMA_MODE_CFE BIT(29) |
92 | #define OWL_DMA_MODE_LME BIT(30) |
93 | #define OWL_DMA_MODE_CME BIT(31) |
94 | |
95 | /* OWL_DMAX_LINKLIST_CTL Bits */ |
96 | #define OWL_DMA_LLC_SAV(x) (((x) & GENMASK(1, 0)) << 8) |
97 | #define OWL_DMA_LLC_SAV_INC OWL_DMA_LLC_SAV(0) |
98 | #define OWL_DMA_LLC_SAV_LOAD_NEXT OWL_DMA_LLC_SAV(1) |
99 | #define OWL_DMA_LLC_SAV_LOAD_PREV OWL_DMA_LLC_SAV(2) |
100 | #define OWL_DMA_LLC_DAV(x) (((x) & GENMASK(1, 0)) << 10) |
101 | #define OWL_DMA_LLC_DAV_INC OWL_DMA_LLC_DAV(0) |
102 | #define OWL_DMA_LLC_DAV_LOAD_NEXT OWL_DMA_LLC_DAV(1) |
103 | #define OWL_DMA_LLC_DAV_LOAD_PREV OWL_DMA_LLC_DAV(2) |
104 | #define OWL_DMA_LLC_SUSPEND BIT(16) |
105 | |
106 | /* OWL_DMAX_INT_CTL Bits */ |
107 | #define OWL_DMA_INTCTL_BLOCK BIT(0) |
108 | #define OWL_DMA_INTCTL_SUPER_BLOCK BIT(1) |
109 | #define OWL_DMA_INTCTL_FRAME BIT(2) |
110 | #define OWL_DMA_INTCTL_HALF_FRAME BIT(3) |
111 | #define OWL_DMA_INTCTL_LAST_FRAME BIT(4) |
112 | |
113 | /* OWL_DMAX_INT_STATUS Bits */ |
114 | #define OWL_DMA_INTSTAT_BLOCK BIT(0) |
115 | #define OWL_DMA_INTSTAT_SUPER_BLOCK BIT(1) |
116 | #define OWL_DMA_INTSTAT_FRAME BIT(2) |
117 | #define OWL_DMA_INTSTAT_HALF_FRAME BIT(3) |
118 | #define OWL_DMA_INTSTAT_LAST_FRAME BIT(4) |
119 | |
120 | /* Pack shift and newshift in a single word */ |
121 | #define BIT_FIELD(val, width, shift, newshift) \ |
122 | ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift)) |
123 | |
124 | /* Frame count value is fixed as 1 */ |
125 | #define FCNT_VAL 0x1 |
126 | |
127 | /** |
128 | * enum owl_dmadesc_offsets - Describe DMA descriptor, hardware link |
129 | * list for dma transfer |
130 | * @OWL_DMADESC_NEXT_LLI: physical address of the next link list |
131 | * @OWL_DMADESC_SADDR: source physical address |
132 | * @OWL_DMADESC_DADDR: destination physical address |
133 | * @OWL_DMADESC_FLEN: frame length |
134 | * @OWL_DMADESC_SRC_STRIDE: source stride |
135 | * @OWL_DMADESC_DST_STRIDE: destination stride |
136 | * @OWL_DMADESC_CTRLA: dma_mode and linklist ctrl config |
137 | * @OWL_DMADESC_CTRLB: interrupt config |
138 | * @OWL_DMADESC_CONST_NUM: data for constant fill |
139 | * @OWL_DMADESC_SIZE: max size of this enum |
140 | */ |
141 | enum owl_dmadesc_offsets { |
142 | OWL_DMADESC_NEXT_LLI = 0, |
143 | OWL_DMADESC_SADDR, |
144 | OWL_DMADESC_DADDR, |
145 | OWL_DMADESC_FLEN, |
146 | OWL_DMADESC_SRC_STRIDE, |
147 | OWL_DMADESC_DST_STRIDE, |
148 | OWL_DMADESC_CTRLA, |
149 | OWL_DMADESC_CTRLB, |
150 | OWL_DMADESC_CONST_NUM, |
151 | OWL_DMADESC_SIZE |
152 | }; |
153 | |
154 | enum owl_dma_id { |
155 | S900_DMA, |
156 | S700_DMA, |
157 | }; |
158 | |
159 | /** |
160 | * struct owl_dma_lli - Link list for dma transfer |
161 | * @hw: hardware link list |
162 | * @phys: physical address of hardware link list |
163 | * @node: node for txd's lli_list |
164 | */ |
165 | struct owl_dma_lli { |
166 | u32 hw[OWL_DMADESC_SIZE]; |
167 | dma_addr_t phys; |
168 | struct list_head node; |
169 | }; |
170 | |
171 | /** |
172 | * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor |
173 | * @vd: virtual DMA descriptor |
174 | * @lli_list: link list of lli nodes |
175 | * @cyclic: flag to indicate cyclic transfers |
176 | */ |
177 | struct owl_dma_txd { |
178 | struct virt_dma_desc vd; |
179 | struct list_head lli_list; |
180 | bool cyclic; |
181 | }; |
182 | |
183 | /** |
184 | * struct owl_dma_pchan - Holder for the physical channels |
185 | * @id: physical index to this channel |
186 | * @base: virtual memory base for the dma channel |
187 | * @vchan: the virtual channel currently being served by this physical channel |
188 | */ |
189 | struct owl_dma_pchan { |
190 | u32 id; |
191 | void __iomem *base; |
192 | struct owl_dma_vchan *vchan; |
193 | }; |
194 | |
195 | /** |
196 | * struct owl_dma_vchan - Wrapper for DMA ENGINE channel |
197 | * @vc: wrapped virtual channel |
198 | * @pchan: the physical channel utilized by this channel |
199 | * @txd: active transaction on this channel |
200 | * @cfg: slave configuration for this channel |
201 | * @drq: physical DMA request ID for this channel |
202 | */ |
203 | struct owl_dma_vchan { |
204 | struct virt_dma_chan vc; |
205 | struct owl_dma_pchan *pchan; |
206 | struct owl_dma_txd *txd; |
207 | struct dma_slave_config cfg; |
208 | u8 drq; |
209 | }; |
210 | |
211 | /** |
212 | * struct owl_dma - Holder for the Owl DMA controller |
213 | * @dma: dma engine for this instance |
214 | * @base: virtual memory base for the DMA controller |
215 | * @clk: clock for the DMA controller |
216 | * @lock: a lock to use when change DMA controller global register |
217 | * @lli_pool: a pool for the LLI descriptors |
218 | * @irq: interrupt ID for the DMA controller |
219 | * @nr_pchans: the number of physical channels |
220 | * @pchans: array of data for the physical channels |
221 | * @nr_vchans: the number of physical channels |
222 | * @vchans: array of data for the physical channels |
223 | * @devid: device id based on OWL SoC |
224 | */ |
225 | struct owl_dma { |
226 | struct dma_device dma; |
227 | void __iomem *base; |
228 | struct clk *clk; |
229 | spinlock_t lock; |
230 | struct dma_pool *lli_pool; |
231 | int irq; |
232 | |
233 | unsigned int nr_pchans; |
234 | struct owl_dma_pchan *pchans; |
235 | |
236 | unsigned int nr_vchans; |
237 | struct owl_dma_vchan *vchans; |
238 | enum owl_dma_id devid; |
239 | }; |
240 | |
241 | static void pchan_update(struct owl_dma_pchan *pchan, u32 reg, |
242 | u32 val, bool state) |
243 | { |
244 | u32 regval; |
245 | |
246 | regval = readl(addr: pchan->base + reg); |
247 | |
248 | if (state) |
249 | regval |= val; |
250 | else |
251 | regval &= ~val; |
252 | |
253 | writel(val, addr: pchan->base + reg); |
254 | } |
255 | |
256 | static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data) |
257 | { |
258 | writel(val: data, addr: pchan->base + reg); |
259 | } |
260 | |
261 | static u32 pchan_readl(struct owl_dma_pchan *pchan, u32 reg) |
262 | { |
263 | return readl(addr: pchan->base + reg); |
264 | } |
265 | |
266 | static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state) |
267 | { |
268 | u32 regval; |
269 | |
270 | regval = readl(addr: od->base + reg); |
271 | |
272 | if (state) |
273 | regval |= val; |
274 | else |
275 | regval &= ~val; |
276 | |
277 | writel(val, addr: od->base + reg); |
278 | } |
279 | |
280 | static void dma_writel(struct owl_dma *od, u32 reg, u32 data) |
281 | { |
282 | writel(val: data, addr: od->base + reg); |
283 | } |
284 | |
285 | static u32 dma_readl(struct owl_dma *od, u32 reg) |
286 | { |
287 | return readl(addr: od->base + reg); |
288 | } |
289 | |
290 | static inline struct owl_dma *to_owl_dma(struct dma_device *dd) |
291 | { |
292 | return container_of(dd, struct owl_dma, dma); |
293 | } |
294 | |
295 | static struct device *chan2dev(struct dma_chan *chan) |
296 | { |
297 | return &chan->dev->device; |
298 | } |
299 | |
300 | static inline struct owl_dma_vchan *to_owl_vchan(struct dma_chan *chan) |
301 | { |
302 | return container_of(chan, struct owl_dma_vchan, vc.chan); |
303 | } |
304 | |
305 | static inline struct owl_dma_txd *to_owl_txd(struct dma_async_tx_descriptor *tx) |
306 | { |
307 | return container_of(tx, struct owl_dma_txd, vd.tx); |
308 | } |
309 | |
310 | static inline u32 llc_hw_ctrla(u32 mode, u32 llc_ctl) |
311 | { |
312 | u32 ctl; |
313 | |
314 | ctl = BIT_FIELD(mode, 4, 28, 28) | |
315 | BIT_FIELD(mode, 8, 16, 20) | |
316 | BIT_FIELD(mode, 4, 8, 16) | |
317 | BIT_FIELD(mode, 6, 0, 10) | |
318 | BIT_FIELD(llc_ctl, 2, 10, 8) | |
319 | BIT_FIELD(llc_ctl, 2, 8, 6); |
320 | |
321 | return ctl; |
322 | } |
323 | |
324 | static inline u32 llc_hw_ctrlb(u32 int_ctl) |
325 | { |
326 | u32 ctl; |
327 | |
328 | /* |
329 | * Irrespective of the SoC, ctrlb value starts filling from |
330 | * bit 18. |
331 | */ |
332 | ctl = BIT_FIELD(int_ctl, 7, 0, 18); |
333 | |
334 | return ctl; |
335 | } |
336 | |
337 | static u32 llc_hw_flen(struct owl_dma_lli *lli) |
338 | { |
339 | return lli->hw[OWL_DMADESC_FLEN] & GENMASK(19, 0); |
340 | } |
341 | |
342 | static void owl_dma_free_lli(struct owl_dma *od, |
343 | struct owl_dma_lli *lli) |
344 | { |
345 | list_del(entry: &lli->node); |
346 | dma_pool_free(pool: od->lli_pool, vaddr: lli, addr: lli->phys); |
347 | } |
348 | |
349 | static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od) |
350 | { |
351 | struct owl_dma_lli *lli; |
352 | dma_addr_t phys; |
353 | |
354 | lli = dma_pool_alloc(pool: od->lli_pool, GFP_NOWAIT, handle: &phys); |
355 | if (!lli) |
356 | return NULL; |
357 | |
358 | INIT_LIST_HEAD(list: &lli->node); |
359 | lli->phys = phys; |
360 | |
361 | return lli; |
362 | } |
363 | |
364 | static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd, |
365 | struct owl_dma_lli *prev, |
366 | struct owl_dma_lli *next, |
367 | bool is_cyclic) |
368 | { |
369 | if (!is_cyclic) |
370 | list_add_tail(new: &next->node, head: &txd->lli_list); |
371 | |
372 | if (prev) { |
373 | prev->hw[OWL_DMADESC_NEXT_LLI] = next->phys; |
374 | prev->hw[OWL_DMADESC_CTRLA] |= |
375 | llc_hw_ctrla(OWL_DMA_MODE_LME, llc_ctl: 0); |
376 | } |
377 | |
378 | return next; |
379 | } |
380 | |
381 | static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan, |
382 | struct owl_dma_lli *lli, |
383 | dma_addr_t src, dma_addr_t dst, |
384 | u32 len, enum dma_transfer_direction dir, |
385 | struct dma_slave_config *sconfig, |
386 | bool is_cyclic) |
387 | { |
388 | struct owl_dma *od = to_owl_dma(dd: vchan->vc.chan.device); |
389 | u32 mode, ctrlb; |
390 | |
391 | mode = OWL_DMA_MODE_PW(0); |
392 | |
393 | switch (dir) { |
394 | case DMA_MEM_TO_MEM: |
395 | mode |= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU | |
396 | OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC | |
397 | OWL_DMA_MODE_DAM_INC; |
398 | |
399 | break; |
400 | case DMA_MEM_TO_DEV: |
401 | mode |= OWL_DMA_MODE_TS(vchan->drq) |
402 | | OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV |
403 | | OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST; |
404 | |
405 | /* |
406 | * Hardware only supports 32bit and 8bit buswidth. Since the |
407 | * default is 32bit, select 8bit only when requested. |
408 | */ |
409 | if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) |
410 | mode |= OWL_DMA_MODE_NDDBW_8BIT; |
411 | |
412 | break; |
413 | case DMA_DEV_TO_MEM: |
414 | mode |= OWL_DMA_MODE_TS(vchan->drq) |
415 | | OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU |
416 | | OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC; |
417 | |
418 | /* |
419 | * Hardware only supports 32bit and 8bit buswidth. Since the |
420 | * default is 32bit, select 8bit only when requested. |
421 | */ |
422 | if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) |
423 | mode |= OWL_DMA_MODE_NDDBW_8BIT; |
424 | |
425 | break; |
426 | default: |
427 | return -EINVAL; |
428 | } |
429 | |
430 | lli->hw[OWL_DMADESC_CTRLA] = llc_hw_ctrla(mode, |
431 | OWL_DMA_LLC_SAV_LOAD_NEXT | |
432 | OWL_DMA_LLC_DAV_LOAD_NEXT); |
433 | |
434 | if (is_cyclic) |
435 | ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK); |
436 | else |
437 | ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK); |
438 | |
439 | lli->hw[OWL_DMADESC_NEXT_LLI] = 0; /* One link list by default */ |
440 | lli->hw[OWL_DMADESC_SADDR] = src; |
441 | lli->hw[OWL_DMADESC_DADDR] = dst; |
442 | lli->hw[OWL_DMADESC_SRC_STRIDE] = 0; |
443 | lli->hw[OWL_DMADESC_DST_STRIDE] = 0; |
444 | |
445 | if (od->devid == S700_DMA) { |
446 | /* Max frame length is 1MB */ |
447 | lli->hw[OWL_DMADESC_FLEN] = len; |
448 | /* |
449 | * On S700, word starts from offset 0x1C is shared between |
450 | * frame count and ctrlb, where first 12 bits are for frame |
451 | * count and rest of 20 bits are for ctrlb. |
452 | */ |
453 | lli->hw[OWL_DMADESC_CTRLB] = FCNT_VAL | ctrlb; |
454 | } else { |
455 | /* |
456 | * On S900, word starts from offset 0xC is shared between |
457 | * frame length (max frame length is 1MB) and frame count, |
458 | * where first 20 bits are for frame length and rest of |
459 | * 12 bits are for frame count. |
460 | */ |
461 | lli->hw[OWL_DMADESC_FLEN] = len | FCNT_VAL << 20; |
462 | lli->hw[OWL_DMADESC_CTRLB] = ctrlb; |
463 | } |
464 | |
465 | return 0; |
466 | } |
467 | |
468 | static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od, |
469 | struct owl_dma_vchan *vchan) |
470 | { |
471 | struct owl_dma_pchan *pchan = NULL; |
472 | unsigned long flags; |
473 | int i; |
474 | |
475 | for (i = 0; i < od->nr_pchans; i++) { |
476 | pchan = &od->pchans[i]; |
477 | |
478 | spin_lock_irqsave(&od->lock, flags); |
479 | if (!pchan->vchan) { |
480 | pchan->vchan = vchan; |
481 | spin_unlock_irqrestore(lock: &od->lock, flags); |
482 | break; |
483 | } |
484 | |
485 | spin_unlock_irqrestore(lock: &od->lock, flags); |
486 | } |
487 | |
488 | return pchan; |
489 | } |
490 | |
491 | static int owl_dma_pchan_busy(struct owl_dma *od, struct owl_dma_pchan *pchan) |
492 | { |
493 | unsigned int val; |
494 | |
495 | val = dma_readl(od, OWL_DMA_IDLE_STAT); |
496 | |
497 | return !(val & (1 << pchan->id)); |
498 | } |
499 | |
500 | static void owl_dma_terminate_pchan(struct owl_dma *od, |
501 | struct owl_dma_pchan *pchan) |
502 | { |
503 | unsigned long flags; |
504 | u32 irq_pd; |
505 | |
506 | pchan_writel(pchan, OWL_DMAX_START, data: 0); |
507 | pchan_update(pchan, OWL_DMAX_INT_STATUS, val: 0xff, state: false); |
508 | |
509 | spin_lock_irqsave(&od->lock, flags); |
510 | dma_update(od, OWL_DMA_IRQ_EN0, val: (1 << pchan->id), state: false); |
511 | |
512 | irq_pd = dma_readl(od, OWL_DMA_IRQ_PD0); |
513 | if (irq_pd & (1 << pchan->id)) { |
514 | dev_warn(od->dma.dev, |
515 | "terminating pchan %d that still has pending irq\n" , |
516 | pchan->id); |
517 | dma_writel(od, OWL_DMA_IRQ_PD0, data: (1 << pchan->id)); |
518 | } |
519 | |
520 | pchan->vchan = NULL; |
521 | |
522 | spin_unlock_irqrestore(lock: &od->lock, flags); |
523 | } |
524 | |
525 | static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan) |
526 | { |
527 | pchan_writel(pchan, reg: 1, OWL_DMAX_PAUSE); |
528 | } |
529 | |
530 | static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan) |
531 | { |
532 | pchan_writel(pchan, reg: 0, OWL_DMAX_PAUSE); |
533 | } |
534 | |
535 | static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan) |
536 | { |
537 | struct owl_dma *od = to_owl_dma(dd: vchan->vc.chan.device); |
538 | struct virt_dma_desc *vd = vchan_next_desc(vc: &vchan->vc); |
539 | struct owl_dma_pchan *pchan = vchan->pchan; |
540 | struct owl_dma_txd *txd = to_owl_txd(tx: &vd->tx); |
541 | struct owl_dma_lli *lli; |
542 | unsigned long flags; |
543 | u32 int_ctl; |
544 | |
545 | list_del(entry: &vd->node); |
546 | |
547 | vchan->txd = txd; |
548 | |
549 | /* Wait for channel inactive */ |
550 | while (owl_dma_pchan_busy(od, pchan)) |
551 | cpu_relax(); |
552 | |
553 | lli = list_first_entry(&txd->lli_list, |
554 | struct owl_dma_lli, node); |
555 | |
556 | if (txd->cyclic) |
557 | int_ctl = OWL_DMA_INTCTL_BLOCK; |
558 | else |
559 | int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK; |
560 | |
561 | pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME); |
562 | pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL, |
563 | OWL_DMA_LLC_SAV_LOAD_NEXT | OWL_DMA_LLC_DAV_LOAD_NEXT); |
564 | pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, data: lli->phys); |
565 | pchan_writel(pchan, OWL_DMAX_INT_CTL, data: int_ctl); |
566 | |
567 | /* Clear IRQ status for this pchan */ |
568 | pchan_update(pchan, OWL_DMAX_INT_STATUS, val: 0xff, state: false); |
569 | |
570 | spin_lock_irqsave(&od->lock, flags); |
571 | |
572 | dma_update(od, OWL_DMA_IRQ_EN0, val: (1 << pchan->id), state: true); |
573 | |
574 | spin_unlock_irqrestore(lock: &od->lock, flags); |
575 | |
576 | dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n" , pchan->id); |
577 | |
578 | /* Start DMA transfer for this pchan */ |
579 | pchan_writel(pchan, OWL_DMAX_START, data: 0x1); |
580 | |
581 | return 0; |
582 | } |
583 | |
584 | static void owl_dma_phy_free(struct owl_dma *od, struct owl_dma_vchan *vchan) |
585 | { |
586 | /* Ensure that the physical channel is stopped */ |
587 | owl_dma_terminate_pchan(od, pchan: vchan->pchan); |
588 | |
589 | vchan->pchan = NULL; |
590 | } |
591 | |
592 | static irqreturn_t owl_dma_interrupt(int irq, void *dev_id) |
593 | { |
594 | struct owl_dma *od = dev_id; |
595 | struct owl_dma_vchan *vchan; |
596 | struct owl_dma_pchan *pchan; |
597 | unsigned long pending; |
598 | int i; |
599 | unsigned int global_irq_pending, chan_irq_pending; |
600 | |
601 | spin_lock(lock: &od->lock); |
602 | |
603 | pending = dma_readl(od, OWL_DMA_IRQ_PD0); |
604 | |
605 | /* Clear IRQ status for each pchan */ |
606 | for_each_set_bit(i, &pending, od->nr_pchans) { |
607 | pchan = &od->pchans[i]; |
608 | pchan_update(pchan, OWL_DMAX_INT_STATUS, val: 0xff, state: false); |
609 | } |
610 | |
611 | /* Clear pending IRQ */ |
612 | dma_writel(od, OWL_DMA_IRQ_PD0, data: pending); |
613 | |
614 | /* Check missed pending IRQ */ |
615 | for (i = 0; i < od->nr_pchans; i++) { |
616 | pchan = &od->pchans[i]; |
617 | chan_irq_pending = pchan_readl(pchan, OWL_DMAX_INT_CTL) & |
618 | pchan_readl(pchan, OWL_DMAX_INT_STATUS); |
619 | |
620 | /* Dummy read to ensure OWL_DMA_IRQ_PD0 value is updated */ |
621 | dma_readl(od, OWL_DMA_IRQ_PD0); |
622 | |
623 | global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0); |
624 | |
625 | if (chan_irq_pending && !(global_irq_pending & BIT(i))) { |
626 | dev_dbg(od->dma.dev, |
627 | "global and channel IRQ pending match err\n" ); |
628 | |
629 | /* Clear IRQ status for this pchan */ |
630 | pchan_update(pchan, OWL_DMAX_INT_STATUS, |
631 | val: 0xff, state: false); |
632 | |
633 | /* Update global IRQ pending */ |
634 | pending |= BIT(i); |
635 | } |
636 | } |
637 | |
638 | spin_unlock(lock: &od->lock); |
639 | |
640 | for_each_set_bit(i, &pending, od->nr_pchans) { |
641 | struct owl_dma_txd *txd; |
642 | |
643 | pchan = &od->pchans[i]; |
644 | |
645 | vchan = pchan->vchan; |
646 | if (!vchan) { |
647 | dev_warn(od->dma.dev, "no vchan attached on pchan %d\n" , |
648 | pchan->id); |
649 | continue; |
650 | } |
651 | |
652 | spin_lock(lock: &vchan->vc.lock); |
653 | |
654 | txd = vchan->txd; |
655 | if (txd) { |
656 | vchan->txd = NULL; |
657 | |
658 | vchan_cookie_complete(vd: &txd->vd); |
659 | |
660 | /* |
661 | * Start the next descriptor (if any), |
662 | * otherwise free this channel. |
663 | */ |
664 | if (vchan_next_desc(vc: &vchan->vc)) |
665 | owl_dma_start_next_txd(vchan); |
666 | else |
667 | owl_dma_phy_free(od, vchan); |
668 | } |
669 | |
670 | spin_unlock(lock: &vchan->vc.lock); |
671 | } |
672 | |
673 | return IRQ_HANDLED; |
674 | } |
675 | |
676 | static void owl_dma_free_txd(struct owl_dma *od, struct owl_dma_txd *txd) |
677 | { |
678 | struct owl_dma_lli *lli, *_lli; |
679 | |
680 | if (unlikely(!txd)) |
681 | return; |
682 | |
683 | list_for_each_entry_safe(lli, _lli, &txd->lli_list, node) |
684 | owl_dma_free_lli(od, lli); |
685 | |
686 | kfree(objp: txd); |
687 | } |
688 | |
689 | static void owl_dma_desc_free(struct virt_dma_desc *vd) |
690 | { |
691 | struct owl_dma *od = to_owl_dma(dd: vd->tx.chan->device); |
692 | struct owl_dma_txd *txd = to_owl_txd(tx: &vd->tx); |
693 | |
694 | owl_dma_free_txd(od, txd); |
695 | } |
696 | |
697 | static int owl_dma_terminate_all(struct dma_chan *chan) |
698 | { |
699 | struct owl_dma *od = to_owl_dma(dd: chan->device); |
700 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); |
701 | unsigned long flags; |
702 | LIST_HEAD(head); |
703 | |
704 | spin_lock_irqsave(&vchan->vc.lock, flags); |
705 | |
706 | if (vchan->pchan) |
707 | owl_dma_phy_free(od, vchan); |
708 | |
709 | if (vchan->txd) { |
710 | owl_dma_desc_free(vd: &vchan->txd->vd); |
711 | vchan->txd = NULL; |
712 | } |
713 | |
714 | vchan_get_all_descriptors(vc: &vchan->vc, head: &head); |
715 | |
716 | spin_unlock_irqrestore(lock: &vchan->vc.lock, flags); |
717 | |
718 | vchan_dma_desc_free_list(vc: &vchan->vc, head: &head); |
719 | |
720 | return 0; |
721 | } |
722 | |
723 | static int owl_dma_config(struct dma_chan *chan, |
724 | struct dma_slave_config *config) |
725 | { |
726 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); |
727 | |
728 | /* Reject definitely invalid configurations */ |
729 | if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || |
730 | config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) |
731 | return -EINVAL; |
732 | |
733 | memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config)); |
734 | |
735 | return 0; |
736 | } |
737 | |
738 | static int owl_dma_pause(struct dma_chan *chan) |
739 | { |
740 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); |
741 | unsigned long flags; |
742 | |
743 | spin_lock_irqsave(&vchan->vc.lock, flags); |
744 | |
745 | owl_dma_pause_pchan(pchan: vchan->pchan); |
746 | |
747 | spin_unlock_irqrestore(lock: &vchan->vc.lock, flags); |
748 | |
749 | return 0; |
750 | } |
751 | |
752 | static int owl_dma_resume(struct dma_chan *chan) |
753 | { |
754 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); |
755 | unsigned long flags; |
756 | |
757 | if (!vchan->pchan && !vchan->txd) |
758 | return 0; |
759 | |
760 | dev_dbg(chan2dev(chan), "vchan %p: resume\n" , &vchan->vc); |
761 | |
762 | spin_lock_irqsave(&vchan->vc.lock, flags); |
763 | |
764 | owl_dma_resume_pchan(pchan: vchan->pchan); |
765 | |
766 | spin_unlock_irqrestore(lock: &vchan->vc.lock, flags); |
767 | |
768 | return 0; |
769 | } |
770 | |
771 | static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan) |
772 | { |
773 | struct owl_dma_pchan *pchan; |
774 | struct owl_dma_txd *txd; |
775 | struct owl_dma_lli *lli; |
776 | unsigned int next_lli_phy; |
777 | size_t bytes; |
778 | |
779 | pchan = vchan->pchan; |
780 | txd = vchan->txd; |
781 | |
782 | if (!pchan || !txd) |
783 | return 0; |
784 | |
785 | /* Get remain count of current node in link list */ |
786 | bytes = pchan_readl(pchan, OWL_DMAX_REMAIN_CNT); |
787 | |
788 | /* Loop through the preceding nodes to get total remaining bytes */ |
789 | if (pchan_readl(pchan, OWL_DMAX_MODE) & OWL_DMA_MODE_LME) { |
790 | next_lli_phy = pchan_readl(pchan, OWL_DMAX_NEXT_DESCRIPTOR); |
791 | list_for_each_entry(lli, &txd->lli_list, node) { |
792 | /* Start from the next active node */ |
793 | if (lli->phys == next_lli_phy) { |
794 | list_for_each_entry(lli, &txd->lli_list, node) |
795 | bytes += llc_hw_flen(lli); |
796 | break; |
797 | } |
798 | } |
799 | } |
800 | |
801 | return bytes; |
802 | } |
803 | |
804 | static enum dma_status owl_dma_tx_status(struct dma_chan *chan, |
805 | dma_cookie_t cookie, |
806 | struct dma_tx_state *state) |
807 | { |
808 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); |
809 | struct owl_dma_lli *lli; |
810 | struct virt_dma_desc *vd; |
811 | struct owl_dma_txd *txd; |
812 | enum dma_status ret; |
813 | unsigned long flags; |
814 | size_t bytes = 0; |
815 | |
816 | ret = dma_cookie_status(chan, cookie, state); |
817 | if (ret == DMA_COMPLETE || !state) |
818 | return ret; |
819 | |
820 | spin_lock_irqsave(&vchan->vc.lock, flags); |
821 | |
822 | vd = vchan_find_desc(&vchan->vc, cookie); |
823 | if (vd) { |
824 | txd = to_owl_txd(tx: &vd->tx); |
825 | list_for_each_entry(lli, &txd->lli_list, node) |
826 | bytes += llc_hw_flen(lli); |
827 | } else { |
828 | bytes = owl_dma_getbytes_chan(vchan); |
829 | } |
830 | |
831 | spin_unlock_irqrestore(lock: &vchan->vc.lock, flags); |
832 | |
833 | dma_set_residue(state, residue: bytes); |
834 | |
835 | return ret; |
836 | } |
837 | |
838 | static void owl_dma_phy_alloc_and_start(struct owl_dma_vchan *vchan) |
839 | { |
840 | struct owl_dma *od = to_owl_dma(dd: vchan->vc.chan.device); |
841 | struct owl_dma_pchan *pchan; |
842 | |
843 | pchan = owl_dma_get_pchan(od, vchan); |
844 | if (!pchan) |
845 | return; |
846 | |
847 | dev_dbg(od->dma.dev, "allocated pchan %d\n" , pchan->id); |
848 | |
849 | vchan->pchan = pchan; |
850 | owl_dma_start_next_txd(vchan); |
851 | } |
852 | |
853 | static void owl_dma_issue_pending(struct dma_chan *chan) |
854 | { |
855 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); |
856 | unsigned long flags; |
857 | |
858 | spin_lock_irqsave(&vchan->vc.lock, flags); |
859 | if (vchan_issue_pending(vc: &vchan->vc)) { |
860 | if (!vchan->pchan) |
861 | owl_dma_phy_alloc_and_start(vchan); |
862 | } |
863 | spin_unlock_irqrestore(lock: &vchan->vc.lock, flags); |
864 | } |
865 | |
866 | static struct dma_async_tx_descriptor |
867 | *owl_dma_prep_memcpy(struct dma_chan *chan, |
868 | dma_addr_t dst, dma_addr_t src, |
869 | size_t len, unsigned long flags) |
870 | { |
871 | struct owl_dma *od = to_owl_dma(dd: chan->device); |
872 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); |
873 | struct owl_dma_txd *txd; |
874 | struct owl_dma_lli *lli, *prev = NULL; |
875 | size_t offset, bytes; |
876 | int ret; |
877 | |
878 | if (!len) |
879 | return NULL; |
880 | |
881 | txd = kzalloc(size: sizeof(*txd), GFP_NOWAIT); |
882 | if (!txd) |
883 | return NULL; |
884 | |
885 | INIT_LIST_HEAD(list: &txd->lli_list); |
886 | |
887 | /* Process the transfer as frame by frame */ |
888 | for (offset = 0; offset < len; offset += bytes) { |
889 | lli = owl_dma_alloc_lli(od); |
890 | if (!lli) { |
891 | dev_warn(chan2dev(chan), "failed to allocate lli\n" ); |
892 | goto err_txd_free; |
893 | } |
894 | |
895 | bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH); |
896 | |
897 | ret = owl_dma_cfg_lli(vchan, lli, src: src + offset, dst: dst + offset, |
898 | len: bytes, dir: DMA_MEM_TO_MEM, |
899 | sconfig: &vchan->cfg, is_cyclic: txd->cyclic); |
900 | if (ret) { |
901 | dev_warn(chan2dev(chan), "failed to config lli\n" ); |
902 | goto err_txd_free; |
903 | } |
904 | |
905 | prev = owl_dma_add_lli(txd, prev, next: lli, is_cyclic: false); |
906 | } |
907 | |
908 | return vchan_tx_prep(vc: &vchan->vc, vd: &txd->vd, tx_flags: flags); |
909 | |
910 | err_txd_free: |
911 | owl_dma_free_txd(od, txd); |
912 | return NULL; |
913 | } |
914 | |
915 | static struct dma_async_tx_descriptor |
916 | *owl_dma_prep_slave_sg(struct dma_chan *chan, |
917 | struct scatterlist *sgl, |
918 | unsigned int sg_len, |
919 | enum dma_transfer_direction dir, |
920 | unsigned long flags, void *context) |
921 | { |
922 | struct owl_dma *od = to_owl_dma(dd: chan->device); |
923 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); |
924 | struct dma_slave_config *sconfig = &vchan->cfg; |
925 | struct owl_dma_txd *txd; |
926 | struct owl_dma_lli *lli, *prev = NULL; |
927 | struct scatterlist *sg; |
928 | dma_addr_t addr, src = 0, dst = 0; |
929 | size_t len; |
930 | int ret, i; |
931 | |
932 | txd = kzalloc(size: sizeof(*txd), GFP_NOWAIT); |
933 | if (!txd) |
934 | return NULL; |
935 | |
936 | INIT_LIST_HEAD(list: &txd->lli_list); |
937 | |
938 | for_each_sg(sgl, sg, sg_len, i) { |
939 | addr = sg_dma_address(sg); |
940 | len = sg_dma_len(sg); |
941 | |
942 | if (len > OWL_DMA_FRAME_MAX_LENGTH) { |
943 | dev_err(od->dma.dev, |
944 | "frame length exceeds max supported length" ); |
945 | goto err_txd_free; |
946 | } |
947 | |
948 | lli = owl_dma_alloc_lli(od); |
949 | if (!lli) { |
950 | dev_err(chan2dev(chan), "failed to allocate lli" ); |
951 | goto err_txd_free; |
952 | } |
953 | |
954 | if (dir == DMA_MEM_TO_DEV) { |
955 | src = addr; |
956 | dst = sconfig->dst_addr; |
957 | } else { |
958 | src = sconfig->src_addr; |
959 | dst = addr; |
960 | } |
961 | |
962 | ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig, |
963 | is_cyclic: txd->cyclic); |
964 | if (ret) { |
965 | dev_warn(chan2dev(chan), "failed to config lli" ); |
966 | goto err_txd_free; |
967 | } |
968 | |
969 | prev = owl_dma_add_lli(txd, prev, next: lli, is_cyclic: false); |
970 | } |
971 | |
972 | return vchan_tx_prep(vc: &vchan->vc, vd: &txd->vd, tx_flags: flags); |
973 | |
974 | err_txd_free: |
975 | owl_dma_free_txd(od, txd); |
976 | |
977 | return NULL; |
978 | } |
979 | |
980 | static struct dma_async_tx_descriptor |
981 | *owl_prep_dma_cyclic(struct dma_chan *chan, |
982 | dma_addr_t buf_addr, size_t buf_len, |
983 | size_t period_len, |
984 | enum dma_transfer_direction dir, |
985 | unsigned long flags) |
986 | { |
987 | struct owl_dma *od = to_owl_dma(dd: chan->device); |
988 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); |
989 | struct dma_slave_config *sconfig = &vchan->cfg; |
990 | struct owl_dma_txd *txd; |
991 | struct owl_dma_lli *lli, *prev = NULL, *first = NULL; |
992 | dma_addr_t src = 0, dst = 0; |
993 | unsigned int periods = buf_len / period_len; |
994 | int ret, i; |
995 | |
996 | txd = kzalloc(size: sizeof(*txd), GFP_NOWAIT); |
997 | if (!txd) |
998 | return NULL; |
999 | |
1000 | INIT_LIST_HEAD(list: &txd->lli_list); |
1001 | txd->cyclic = true; |
1002 | |
1003 | for (i = 0; i < periods; i++) { |
1004 | lli = owl_dma_alloc_lli(od); |
1005 | if (!lli) { |
1006 | dev_warn(chan2dev(chan), "failed to allocate lli" ); |
1007 | goto err_txd_free; |
1008 | } |
1009 | |
1010 | if (dir == DMA_MEM_TO_DEV) { |
1011 | src = buf_addr + (period_len * i); |
1012 | dst = sconfig->dst_addr; |
1013 | } else if (dir == DMA_DEV_TO_MEM) { |
1014 | src = sconfig->src_addr; |
1015 | dst = buf_addr + (period_len * i); |
1016 | } |
1017 | |
1018 | ret = owl_dma_cfg_lli(vchan, lli, src, dst, len: period_len, |
1019 | dir, sconfig, is_cyclic: txd->cyclic); |
1020 | if (ret) { |
1021 | dev_warn(chan2dev(chan), "failed to config lli" ); |
1022 | goto err_txd_free; |
1023 | } |
1024 | |
1025 | if (!first) |
1026 | first = lli; |
1027 | |
1028 | prev = owl_dma_add_lli(txd, prev, next: lli, is_cyclic: false); |
1029 | } |
1030 | |
1031 | /* close the cyclic list */ |
1032 | owl_dma_add_lli(txd, prev, next: first, is_cyclic: true); |
1033 | |
1034 | return vchan_tx_prep(vc: &vchan->vc, vd: &txd->vd, tx_flags: flags); |
1035 | |
1036 | err_txd_free: |
1037 | owl_dma_free_txd(od, txd); |
1038 | |
1039 | return NULL; |
1040 | } |
1041 | |
1042 | static void owl_dma_free_chan_resources(struct dma_chan *chan) |
1043 | { |
1044 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); |
1045 | |
1046 | /* Ensure all queued descriptors are freed */ |
1047 | vchan_free_chan_resources(vc: &vchan->vc); |
1048 | } |
1049 | |
1050 | static inline void owl_dma_free(struct owl_dma *od) |
1051 | { |
1052 | struct owl_dma_vchan *vchan = NULL; |
1053 | struct owl_dma_vchan *next; |
1054 | |
1055 | list_for_each_entry_safe(vchan, |
1056 | next, &od->dma.channels, vc.chan.device_node) { |
1057 | list_del(entry: &vchan->vc.chan.device_node); |
1058 | tasklet_kill(t: &vchan->vc.task); |
1059 | } |
1060 | } |
1061 | |
1062 | static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec, |
1063 | struct of_dma *ofdma) |
1064 | { |
1065 | struct owl_dma *od = ofdma->of_dma_data; |
1066 | struct owl_dma_vchan *vchan; |
1067 | struct dma_chan *chan; |
1068 | u8 drq = dma_spec->args[0]; |
1069 | |
1070 | if (drq > od->nr_vchans) |
1071 | return NULL; |
1072 | |
1073 | chan = dma_get_any_slave_channel(device: &od->dma); |
1074 | if (!chan) |
1075 | return NULL; |
1076 | |
1077 | vchan = to_owl_vchan(chan); |
1078 | vchan->drq = drq; |
1079 | |
1080 | return chan; |
1081 | } |
1082 | |
1083 | static const struct of_device_id owl_dma_match[] = { |
1084 | { .compatible = "actions,s500-dma" , .data = (void *)S900_DMA,}, |
1085 | { .compatible = "actions,s700-dma" , .data = (void *)S700_DMA,}, |
1086 | { .compatible = "actions,s900-dma" , .data = (void *)S900_DMA,}, |
1087 | { /* sentinel */ }, |
1088 | }; |
1089 | MODULE_DEVICE_TABLE(of, owl_dma_match); |
1090 | |
1091 | static int owl_dma_probe(struct platform_device *pdev) |
1092 | { |
1093 | struct device_node *np = pdev->dev.of_node; |
1094 | struct owl_dma *od; |
1095 | int ret, i, nr_channels, nr_requests; |
1096 | |
1097 | od = devm_kzalloc(dev: &pdev->dev, size: sizeof(*od), GFP_KERNEL); |
1098 | if (!od) |
1099 | return -ENOMEM; |
1100 | |
1101 | od->base = devm_platform_ioremap_resource(pdev, index: 0); |
1102 | if (IS_ERR(ptr: od->base)) |
1103 | return PTR_ERR(ptr: od->base); |
1104 | |
1105 | ret = of_property_read_u32(np, propname: "dma-channels" , out_value: &nr_channels); |
1106 | if (ret) { |
1107 | dev_err(&pdev->dev, "can't get dma-channels\n" ); |
1108 | return ret; |
1109 | } |
1110 | |
1111 | ret = of_property_read_u32(np, propname: "dma-requests" , out_value: &nr_requests); |
1112 | if (ret) { |
1113 | dev_err(&pdev->dev, "can't get dma-requests\n" ); |
1114 | return ret; |
1115 | } |
1116 | |
1117 | dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n" , |
1118 | nr_channels, nr_requests); |
1119 | |
1120 | od->devid = (uintptr_t)of_device_get_match_data(dev: &pdev->dev); |
1121 | |
1122 | od->nr_pchans = nr_channels; |
1123 | od->nr_vchans = nr_requests; |
1124 | |
1125 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); |
1126 | |
1127 | platform_set_drvdata(pdev, data: od); |
1128 | spin_lock_init(&od->lock); |
1129 | |
1130 | dma_cap_set(DMA_MEMCPY, od->dma.cap_mask); |
1131 | dma_cap_set(DMA_SLAVE, od->dma.cap_mask); |
1132 | dma_cap_set(DMA_CYCLIC, od->dma.cap_mask); |
1133 | |
1134 | od->dma.dev = &pdev->dev; |
1135 | od->dma.device_free_chan_resources = owl_dma_free_chan_resources; |
1136 | od->dma.device_tx_status = owl_dma_tx_status; |
1137 | od->dma.device_issue_pending = owl_dma_issue_pending; |
1138 | od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy; |
1139 | od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg; |
1140 | od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic; |
1141 | od->dma.device_config = owl_dma_config; |
1142 | od->dma.device_pause = owl_dma_pause; |
1143 | od->dma.device_resume = owl_dma_resume; |
1144 | od->dma.device_terminate_all = owl_dma_terminate_all; |
1145 | od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
1146 | od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
1147 | od->dma.directions = BIT(DMA_MEM_TO_MEM); |
1148 | od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
1149 | |
1150 | INIT_LIST_HEAD(list: &od->dma.channels); |
1151 | |
1152 | od->clk = devm_clk_get(dev: &pdev->dev, NULL); |
1153 | if (IS_ERR(ptr: od->clk)) { |
1154 | dev_err(&pdev->dev, "unable to get clock\n" ); |
1155 | return PTR_ERR(ptr: od->clk); |
1156 | } |
1157 | |
1158 | /* |
1159 | * Eventhough the DMA controller is capable of generating 4 |
1160 | * IRQ's for DMA priority feature, we only use 1 IRQ for |
1161 | * simplification. |
1162 | */ |
1163 | od->irq = platform_get_irq(pdev, 0); |
1164 | ret = devm_request_irq(dev: &pdev->dev, irq: od->irq, handler: owl_dma_interrupt, irqflags: 0, |
1165 | devname: dev_name(dev: &pdev->dev), dev_id: od); |
1166 | if (ret) { |
1167 | dev_err(&pdev->dev, "unable to request IRQ\n" ); |
1168 | return ret; |
1169 | } |
1170 | |
1171 | /* Init physical channel */ |
1172 | od->pchans = devm_kcalloc(dev: &pdev->dev, n: od->nr_pchans, |
1173 | size: sizeof(struct owl_dma_pchan), GFP_KERNEL); |
1174 | if (!od->pchans) |
1175 | return -ENOMEM; |
1176 | |
1177 | for (i = 0; i < od->nr_pchans; i++) { |
1178 | struct owl_dma_pchan *pchan = &od->pchans[i]; |
1179 | |
1180 | pchan->id = i; |
1181 | pchan->base = od->base + OWL_DMA_CHAN_BASE(i); |
1182 | } |
1183 | |
1184 | /* Init virtual channel */ |
1185 | od->vchans = devm_kcalloc(dev: &pdev->dev, n: od->nr_vchans, |
1186 | size: sizeof(struct owl_dma_vchan), GFP_KERNEL); |
1187 | if (!od->vchans) |
1188 | return -ENOMEM; |
1189 | |
1190 | for (i = 0; i < od->nr_vchans; i++) { |
1191 | struct owl_dma_vchan *vchan = &od->vchans[i]; |
1192 | |
1193 | vchan->vc.desc_free = owl_dma_desc_free; |
1194 | vchan_init(vc: &vchan->vc, dmadev: &od->dma); |
1195 | } |
1196 | |
1197 | /* Create a pool of consistent memory blocks for hardware descriptors */ |
1198 | od->lli_pool = dma_pool_create(name: dev_name(dev: od->dma.dev), dev: od->dma.dev, |
1199 | size: sizeof(struct owl_dma_lli), |
1200 | align: __alignof__(struct owl_dma_lli), |
1201 | allocation: 0); |
1202 | if (!od->lli_pool) { |
1203 | dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n" ); |
1204 | return -ENOMEM; |
1205 | } |
1206 | |
1207 | clk_prepare_enable(clk: od->clk); |
1208 | |
1209 | ret = dma_async_device_register(device: &od->dma); |
1210 | if (ret) { |
1211 | dev_err(&pdev->dev, "failed to register DMA engine device\n" ); |
1212 | goto err_pool_free; |
1213 | } |
1214 | |
1215 | /* Device-tree DMA controller registration */ |
1216 | ret = of_dma_controller_register(np: pdev->dev.of_node, |
1217 | of_dma_xlate: owl_dma_of_xlate, data: od); |
1218 | if (ret) { |
1219 | dev_err(&pdev->dev, "of_dma_controller_register failed\n" ); |
1220 | goto err_dma_unregister; |
1221 | } |
1222 | |
1223 | return 0; |
1224 | |
1225 | err_dma_unregister: |
1226 | dma_async_device_unregister(device: &od->dma); |
1227 | err_pool_free: |
1228 | clk_disable_unprepare(clk: od->clk); |
1229 | dma_pool_destroy(pool: od->lli_pool); |
1230 | |
1231 | return ret; |
1232 | } |
1233 | |
1234 | static void owl_dma_remove(struct platform_device *pdev) |
1235 | { |
1236 | struct owl_dma *od = platform_get_drvdata(pdev); |
1237 | |
1238 | of_dma_controller_free(np: pdev->dev.of_node); |
1239 | dma_async_device_unregister(device: &od->dma); |
1240 | |
1241 | /* Mask all interrupts for this execution environment */ |
1242 | dma_writel(od, OWL_DMA_IRQ_EN0, data: 0x0); |
1243 | |
1244 | /* Make sure we won't have any further interrupts */ |
1245 | devm_free_irq(dev: od->dma.dev, irq: od->irq, dev_id: od); |
1246 | |
1247 | owl_dma_free(od); |
1248 | |
1249 | clk_disable_unprepare(clk: od->clk); |
1250 | dma_pool_destroy(pool: od->lli_pool); |
1251 | } |
1252 | |
1253 | static struct platform_driver owl_dma_driver = { |
1254 | .probe = owl_dma_probe, |
1255 | .remove_new = owl_dma_remove, |
1256 | .driver = { |
1257 | .name = "dma-owl" , |
1258 | .of_match_table = of_match_ptr(owl_dma_match), |
1259 | }, |
1260 | }; |
1261 | |
1262 | static int owl_dma_init(void) |
1263 | { |
1264 | return platform_driver_register(&owl_dma_driver); |
1265 | } |
1266 | subsys_initcall(owl_dma_init); |
1267 | |
1268 | static void __exit owl_dma_exit(void) |
1269 | { |
1270 | platform_driver_unregister(&owl_dma_driver); |
1271 | } |
1272 | module_exit(owl_dma_exit); |
1273 | |
1274 | MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>" ); |
1275 | MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>" ); |
1276 | MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver" ); |
1277 | MODULE_LICENSE("GPL" ); |
1278 | |