1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Topcliff PCH DMA controller driver |
4 | * Copyright (c) 2010 Intel Corporation |
5 | * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. |
6 | */ |
7 | |
8 | #include <linux/dmaengine.h> |
9 | #include <linux/dma-mapping.h> |
10 | #include <linux/init.h> |
11 | #include <linux/pci.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/module.h> |
15 | #include <linux/pch_dma.h> |
16 | |
17 | #include "dmaengine.h" |
18 | |
19 | #define DRV_NAME "pch-dma" |
20 | |
21 | #define DMA_CTL0_DISABLE 0x0 |
22 | #define DMA_CTL0_SG 0x1 |
23 | #define DMA_CTL0_ONESHOT 0x2 |
24 | #define DMA_CTL0_MODE_MASK_BITS 0x3 |
25 | #define DMA_CTL0_DIR_SHIFT_BITS 2 |
26 | #define DMA_CTL0_BITS_PER_CH 4 |
27 | |
28 | #define DMA_CTL2_START_SHIFT_BITS 8 |
29 | #define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1) |
30 | |
31 | #define DMA_STATUS_IDLE 0x0 |
32 | #define DMA_STATUS_DESC_READ 0x1 |
33 | #define DMA_STATUS_WAIT 0x2 |
34 | #define DMA_STATUS_ACCESS 0x3 |
35 | #define DMA_STATUS_BITS_PER_CH 2 |
36 | #define DMA_STATUS_MASK_BITS 0x3 |
37 | #define DMA_STATUS_SHIFT_BITS 16 |
38 | #define DMA_STATUS_IRQ(x) (0x1 << (x)) |
39 | #define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8)) |
40 | #define DMA_STATUS2_ERR(x) (0x1 << (x)) |
41 | |
42 | #define DMA_DESC_WIDTH_SHIFT_BITS 12 |
43 | #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) |
44 | #define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS) |
45 | #define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS) |
46 | #define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF |
47 | #define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF |
48 | #define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF |
49 | #define DMA_DESC_END_WITHOUT_IRQ 0x0 |
50 | #define DMA_DESC_END_WITH_IRQ 0x1 |
51 | #define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2 |
52 | #define DMA_DESC_FOLLOW_WITH_IRQ 0x3 |
53 | |
54 | #define MAX_CHAN_NR 12 |
55 | |
56 | #define DMA_MASK_CTL0_MODE 0x33333333 |
57 | #define DMA_MASK_CTL2_MODE 0x00003333 |
58 | |
59 | static unsigned int init_nr_desc_per_channel = 64; |
60 | module_param(init_nr_desc_per_channel, uint, 0644); |
61 | MODULE_PARM_DESC(init_nr_desc_per_channel, |
62 | "initial descriptors per channel (default: 64)" ); |
63 | |
64 | struct pch_dma_desc_regs { |
65 | u32 dev_addr; |
66 | u32 mem_addr; |
67 | u32 size; |
68 | u32 next; |
69 | }; |
70 | |
71 | struct pch_dma_regs { |
72 | u32 dma_ctl0; |
73 | u32 dma_ctl1; |
74 | u32 dma_ctl2; |
75 | u32 dma_ctl3; |
76 | u32 dma_sts0; |
77 | u32 dma_sts1; |
78 | u32 dma_sts2; |
79 | u32 reserved3; |
80 | struct pch_dma_desc_regs desc[MAX_CHAN_NR]; |
81 | }; |
82 | |
83 | struct pch_dma_desc { |
84 | struct pch_dma_desc_regs regs; |
85 | struct dma_async_tx_descriptor txd; |
86 | struct list_head desc_node; |
87 | struct list_head tx_list; |
88 | }; |
89 | |
90 | struct pch_dma_chan { |
91 | struct dma_chan chan; |
92 | void __iomem *membase; |
93 | enum dma_transfer_direction dir; |
94 | struct tasklet_struct tasklet; |
95 | unsigned long err_status; |
96 | |
97 | spinlock_t lock; |
98 | |
99 | struct list_head active_list; |
100 | struct list_head queue; |
101 | struct list_head free_list; |
102 | unsigned int descs_allocated; |
103 | }; |
104 | |
105 | #define PDC_DEV_ADDR 0x00 |
106 | #define PDC_MEM_ADDR 0x04 |
107 | #define PDC_SIZE 0x08 |
108 | #define PDC_NEXT 0x0C |
109 | |
110 | #define channel_readl(pdc, name) \ |
111 | readl((pdc)->membase + PDC_##name) |
112 | #define channel_writel(pdc, name, val) \ |
113 | writel((val), (pdc)->membase + PDC_##name) |
114 | |
115 | struct pch_dma { |
116 | struct dma_device dma; |
117 | void __iomem *membase; |
118 | struct dma_pool *pool; |
119 | struct pch_dma_regs regs; |
120 | struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; |
121 | struct pch_dma_chan channels[MAX_CHAN_NR]; |
122 | }; |
123 | |
124 | #define PCH_DMA_CTL0 0x00 |
125 | #define PCH_DMA_CTL1 0x04 |
126 | #define PCH_DMA_CTL2 0x08 |
127 | #define PCH_DMA_CTL3 0x0C |
128 | #define PCH_DMA_STS0 0x10 |
129 | #define PCH_DMA_STS1 0x14 |
130 | #define PCH_DMA_STS2 0x18 |
131 | |
132 | #define dma_readl(pd, name) \ |
133 | readl((pd)->membase + PCH_DMA_##name) |
134 | #define dma_writel(pd, name, val) \ |
135 | writel((val), (pd)->membase + PCH_DMA_##name) |
136 | |
137 | static inline |
138 | struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd) |
139 | { |
140 | return container_of(txd, struct pch_dma_desc, txd); |
141 | } |
142 | |
143 | static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan) |
144 | { |
145 | return container_of(chan, struct pch_dma_chan, chan); |
146 | } |
147 | |
148 | static inline struct pch_dma *to_pd(struct dma_device *ddev) |
149 | { |
150 | return container_of(ddev, struct pch_dma, dma); |
151 | } |
152 | |
153 | static inline struct device *chan2dev(struct dma_chan *chan) |
154 | { |
155 | return &chan->dev->device; |
156 | } |
157 | |
158 | static inline struct device *chan2parent(struct dma_chan *chan) |
159 | { |
160 | return chan->dev->device.parent; |
161 | } |
162 | |
163 | static inline |
164 | struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) |
165 | { |
166 | return list_first_entry(&pd_chan->active_list, |
167 | struct pch_dma_desc, desc_node); |
168 | } |
169 | |
170 | static inline |
171 | struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan) |
172 | { |
173 | return list_first_entry(&pd_chan->queue, |
174 | struct pch_dma_desc, desc_node); |
175 | } |
176 | |
177 | static void pdc_enable_irq(struct dma_chan *chan, int enable) |
178 | { |
179 | struct pch_dma *pd = to_pd(ddev: chan->device); |
180 | u32 val; |
181 | int pos; |
182 | |
183 | if (chan->chan_id < 8) |
184 | pos = chan->chan_id; |
185 | else |
186 | pos = chan->chan_id + 8; |
187 | |
188 | val = dma_readl(pd, CTL2); |
189 | |
190 | if (enable) |
191 | val |= 0x1 << pos; |
192 | else |
193 | val &= ~(0x1 << pos); |
194 | |
195 | dma_writel(pd, CTL2, val); |
196 | |
197 | dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n" , |
198 | chan->chan_id, val); |
199 | } |
200 | |
201 | static void pdc_set_dir(struct dma_chan *chan) |
202 | { |
203 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
204 | struct pch_dma *pd = to_pd(ddev: chan->device); |
205 | u32 val; |
206 | u32 mask_mode; |
207 | u32 mask_ctl; |
208 | |
209 | if (chan->chan_id < 8) { |
210 | val = dma_readl(pd, CTL0); |
211 | |
212 | mask_mode = DMA_CTL0_MODE_MASK_BITS << |
213 | (DMA_CTL0_BITS_PER_CH * chan->chan_id); |
214 | mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << |
215 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); |
216 | val &= mask_mode; |
217 | if (pd_chan->dir == DMA_MEM_TO_DEV) |
218 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
219 | DMA_CTL0_DIR_SHIFT_BITS); |
220 | else |
221 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
222 | DMA_CTL0_DIR_SHIFT_BITS)); |
223 | |
224 | val |= mask_ctl; |
225 | dma_writel(pd, CTL0, val); |
226 | } else { |
227 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ |
228 | val = dma_readl(pd, CTL3); |
229 | |
230 | mask_mode = DMA_CTL0_MODE_MASK_BITS << |
231 | (DMA_CTL0_BITS_PER_CH * ch); |
232 | mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << |
233 | (DMA_CTL0_BITS_PER_CH * ch)); |
234 | val &= mask_mode; |
235 | if (pd_chan->dir == DMA_MEM_TO_DEV) |
236 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + |
237 | DMA_CTL0_DIR_SHIFT_BITS); |
238 | else |
239 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + |
240 | DMA_CTL0_DIR_SHIFT_BITS)); |
241 | val |= mask_ctl; |
242 | dma_writel(pd, CTL3, val); |
243 | } |
244 | |
245 | dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n" , |
246 | chan->chan_id, val); |
247 | } |
248 | |
249 | static void pdc_set_mode(struct dma_chan *chan, u32 mode) |
250 | { |
251 | struct pch_dma *pd = to_pd(ddev: chan->device); |
252 | u32 val; |
253 | u32 mask_ctl; |
254 | u32 mask_dir; |
255 | |
256 | if (chan->chan_id < 8) { |
257 | mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << |
258 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); |
259 | mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\ |
260 | DMA_CTL0_DIR_SHIFT_BITS); |
261 | val = dma_readl(pd, CTL0); |
262 | val &= mask_dir; |
263 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); |
264 | val |= mask_ctl; |
265 | dma_writel(pd, CTL0, val); |
266 | } else { |
267 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ |
268 | mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << |
269 | (DMA_CTL0_BITS_PER_CH * ch)); |
270 | mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\ |
271 | DMA_CTL0_DIR_SHIFT_BITS); |
272 | val = dma_readl(pd, CTL3); |
273 | val &= mask_dir; |
274 | val |= mode << (DMA_CTL0_BITS_PER_CH * ch); |
275 | val |= mask_ctl; |
276 | dma_writel(pd, CTL3, val); |
277 | } |
278 | |
279 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n" , |
280 | chan->chan_id, val); |
281 | } |
282 | |
283 | static u32 pdc_get_status0(struct pch_dma_chan *pd_chan) |
284 | { |
285 | struct pch_dma *pd = to_pd(ddev: pd_chan->chan.device); |
286 | u32 val; |
287 | |
288 | val = dma_readl(pd, STS0); |
289 | return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + |
290 | DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); |
291 | } |
292 | |
293 | static u32 pdc_get_status2(struct pch_dma_chan *pd_chan) |
294 | { |
295 | struct pch_dma *pd = to_pd(ddev: pd_chan->chan.device); |
296 | u32 val; |
297 | |
298 | val = dma_readl(pd, STS2); |
299 | return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + |
300 | DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8))); |
301 | } |
302 | |
303 | static bool pdc_is_idle(struct pch_dma_chan *pd_chan) |
304 | { |
305 | u32 sts; |
306 | |
307 | if (pd_chan->chan.chan_id < 8) |
308 | sts = pdc_get_status0(pd_chan); |
309 | else |
310 | sts = pdc_get_status2(pd_chan); |
311 | |
312 | |
313 | if (sts == DMA_STATUS_IDLE) |
314 | return true; |
315 | else |
316 | return false; |
317 | } |
318 | |
319 | static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) |
320 | { |
321 | if (!pdc_is_idle(pd_chan)) { |
322 | dev_err(chan2dev(&pd_chan->chan), |
323 | "BUG: Attempt to start non-idle channel\n" ); |
324 | return; |
325 | } |
326 | |
327 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n" , |
328 | pd_chan->chan.chan_id, desc->regs.dev_addr); |
329 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n" , |
330 | pd_chan->chan.chan_id, desc->regs.mem_addr); |
331 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n" , |
332 | pd_chan->chan.chan_id, desc->regs.size); |
333 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n" , |
334 | pd_chan->chan.chan_id, desc->regs.next); |
335 | |
336 | if (list_empty(head: &desc->tx_list)) { |
337 | channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); |
338 | channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); |
339 | channel_writel(pd_chan, SIZE, desc->regs.size); |
340 | channel_writel(pd_chan, NEXT, desc->regs.next); |
341 | pdc_set_mode(chan: &pd_chan->chan, DMA_CTL0_ONESHOT); |
342 | } else { |
343 | channel_writel(pd_chan, NEXT, desc->txd.phys); |
344 | pdc_set_mode(chan: &pd_chan->chan, DMA_CTL0_SG); |
345 | } |
346 | } |
347 | |
348 | static void pdc_chain_complete(struct pch_dma_chan *pd_chan, |
349 | struct pch_dma_desc *desc) |
350 | { |
351 | struct dma_async_tx_descriptor *txd = &desc->txd; |
352 | struct dmaengine_desc_callback cb; |
353 | |
354 | dmaengine_desc_get_callback(tx: txd, cb: &cb); |
355 | list_splice_init(list: &desc->tx_list, head: &pd_chan->free_list); |
356 | list_move(list: &desc->desc_node, head: &pd_chan->free_list); |
357 | |
358 | dmaengine_desc_callback_invoke(cb: &cb, NULL); |
359 | } |
360 | |
361 | static void pdc_complete_all(struct pch_dma_chan *pd_chan) |
362 | { |
363 | struct pch_dma_desc *desc, *_d; |
364 | LIST_HEAD(list); |
365 | |
366 | BUG_ON(!pdc_is_idle(pd_chan)); |
367 | |
368 | if (!list_empty(head: &pd_chan->queue)) |
369 | pdc_dostart(pd_chan, desc: pdc_first_queued(pd_chan)); |
370 | |
371 | list_splice_init(list: &pd_chan->active_list, head: &list); |
372 | list_splice_init(list: &pd_chan->queue, head: &pd_chan->active_list); |
373 | |
374 | list_for_each_entry_safe(desc, _d, &list, desc_node) |
375 | pdc_chain_complete(pd_chan, desc); |
376 | } |
377 | |
378 | static void pdc_handle_error(struct pch_dma_chan *pd_chan) |
379 | { |
380 | struct pch_dma_desc *bad_desc; |
381 | |
382 | bad_desc = pdc_first_active(pd_chan); |
383 | list_del(entry: &bad_desc->desc_node); |
384 | |
385 | list_splice_init(list: &pd_chan->queue, head: pd_chan->active_list.prev); |
386 | |
387 | if (!list_empty(head: &pd_chan->active_list)) |
388 | pdc_dostart(pd_chan, desc: pdc_first_active(pd_chan)); |
389 | |
390 | dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n" ); |
391 | dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n" , |
392 | bad_desc->txd.cookie); |
393 | |
394 | pdc_chain_complete(pd_chan, desc: bad_desc); |
395 | } |
396 | |
397 | static void pdc_advance_work(struct pch_dma_chan *pd_chan) |
398 | { |
399 | if (list_empty(head: &pd_chan->active_list) || |
400 | list_is_singular(head: &pd_chan->active_list)) { |
401 | pdc_complete_all(pd_chan); |
402 | } else { |
403 | pdc_chain_complete(pd_chan, desc: pdc_first_active(pd_chan)); |
404 | pdc_dostart(pd_chan, desc: pdc_first_active(pd_chan)); |
405 | } |
406 | } |
407 | |
408 | static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) |
409 | { |
410 | struct pch_dma_desc *desc = to_pd_desc(txd); |
411 | struct pch_dma_chan *pd_chan = to_pd_chan(chan: txd->chan); |
412 | |
413 | spin_lock(lock: &pd_chan->lock); |
414 | |
415 | if (list_empty(head: &pd_chan->active_list)) { |
416 | list_add_tail(new: &desc->desc_node, head: &pd_chan->active_list); |
417 | pdc_dostart(pd_chan, desc); |
418 | } else { |
419 | list_add_tail(new: &desc->desc_node, head: &pd_chan->queue); |
420 | } |
421 | |
422 | spin_unlock(lock: &pd_chan->lock); |
423 | return 0; |
424 | } |
425 | |
426 | static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) |
427 | { |
428 | struct pch_dma_desc *desc = NULL; |
429 | struct pch_dma *pd = to_pd(ddev: chan->device); |
430 | dma_addr_t addr; |
431 | |
432 | desc = dma_pool_zalloc(pool: pd->pool, mem_flags: flags, handle: &addr); |
433 | if (desc) { |
434 | INIT_LIST_HEAD(list: &desc->tx_list); |
435 | dma_async_tx_descriptor_init(tx: &desc->txd, chan); |
436 | desc->txd.tx_submit = pd_tx_submit; |
437 | desc->txd.flags = DMA_CTRL_ACK; |
438 | desc->txd.phys = addr; |
439 | } |
440 | |
441 | return desc; |
442 | } |
443 | |
444 | static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) |
445 | { |
446 | struct pch_dma_desc *desc, *_d; |
447 | struct pch_dma_desc *ret = NULL; |
448 | int i = 0; |
449 | |
450 | spin_lock(lock: &pd_chan->lock); |
451 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { |
452 | i++; |
453 | if (async_tx_test_ack(tx: &desc->txd)) { |
454 | list_del(entry: &desc->desc_node); |
455 | ret = desc; |
456 | break; |
457 | } |
458 | dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n" , desc); |
459 | } |
460 | spin_unlock(lock: &pd_chan->lock); |
461 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n" , i); |
462 | |
463 | if (!ret) { |
464 | ret = pdc_alloc_desc(chan: &pd_chan->chan, GFP_ATOMIC); |
465 | if (ret) { |
466 | spin_lock(lock: &pd_chan->lock); |
467 | pd_chan->descs_allocated++; |
468 | spin_unlock(lock: &pd_chan->lock); |
469 | } else { |
470 | dev_err(chan2dev(&pd_chan->chan), |
471 | "failed to alloc desc\n" ); |
472 | } |
473 | } |
474 | |
475 | return ret; |
476 | } |
477 | |
478 | static void pdc_desc_put(struct pch_dma_chan *pd_chan, |
479 | struct pch_dma_desc *desc) |
480 | { |
481 | if (desc) { |
482 | spin_lock(lock: &pd_chan->lock); |
483 | list_splice_init(list: &desc->tx_list, head: &pd_chan->free_list); |
484 | list_add(new: &desc->desc_node, head: &pd_chan->free_list); |
485 | spin_unlock(lock: &pd_chan->lock); |
486 | } |
487 | } |
488 | |
489 | static int pd_alloc_chan_resources(struct dma_chan *chan) |
490 | { |
491 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
492 | struct pch_dma_desc *desc; |
493 | LIST_HEAD(tmp_list); |
494 | int i; |
495 | |
496 | if (!pdc_is_idle(pd_chan)) { |
497 | dev_dbg(chan2dev(chan), "DMA channel not idle ?\n" ); |
498 | return -EIO; |
499 | } |
500 | |
501 | if (!list_empty(head: &pd_chan->free_list)) |
502 | return pd_chan->descs_allocated; |
503 | |
504 | for (i = 0; i < init_nr_desc_per_channel; i++) { |
505 | desc = pdc_alloc_desc(chan, GFP_KERNEL); |
506 | |
507 | if (!desc) { |
508 | dev_warn(chan2dev(chan), |
509 | "Only allocated %d initial descriptors\n" , i); |
510 | break; |
511 | } |
512 | |
513 | list_add_tail(new: &desc->desc_node, head: &tmp_list); |
514 | } |
515 | |
516 | spin_lock_irq(lock: &pd_chan->lock); |
517 | list_splice(list: &tmp_list, head: &pd_chan->free_list); |
518 | pd_chan->descs_allocated = i; |
519 | dma_cookie_init(chan); |
520 | spin_unlock_irq(lock: &pd_chan->lock); |
521 | |
522 | pdc_enable_irq(chan, enable: 1); |
523 | |
524 | return pd_chan->descs_allocated; |
525 | } |
526 | |
527 | static void pd_free_chan_resources(struct dma_chan *chan) |
528 | { |
529 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
530 | struct pch_dma *pd = to_pd(ddev: chan->device); |
531 | struct pch_dma_desc *desc, *_d; |
532 | LIST_HEAD(tmp_list); |
533 | |
534 | BUG_ON(!pdc_is_idle(pd_chan)); |
535 | BUG_ON(!list_empty(&pd_chan->active_list)); |
536 | BUG_ON(!list_empty(&pd_chan->queue)); |
537 | |
538 | spin_lock_irq(lock: &pd_chan->lock); |
539 | list_splice_init(list: &pd_chan->free_list, head: &tmp_list); |
540 | pd_chan->descs_allocated = 0; |
541 | spin_unlock_irq(lock: &pd_chan->lock); |
542 | |
543 | list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) |
544 | dma_pool_free(pool: pd->pool, vaddr: desc, addr: desc->txd.phys); |
545 | |
546 | pdc_enable_irq(chan, enable: 0); |
547 | } |
548 | |
549 | static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
550 | struct dma_tx_state *txstate) |
551 | { |
552 | return dma_cookie_status(chan, cookie, state: txstate); |
553 | } |
554 | |
555 | static void pd_issue_pending(struct dma_chan *chan) |
556 | { |
557 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
558 | |
559 | if (pdc_is_idle(pd_chan)) { |
560 | spin_lock(lock: &pd_chan->lock); |
561 | pdc_advance_work(pd_chan); |
562 | spin_unlock(lock: &pd_chan->lock); |
563 | } |
564 | } |
565 | |
566 | static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, |
567 | struct scatterlist *sgl, unsigned int sg_len, |
568 | enum dma_transfer_direction direction, unsigned long flags, |
569 | void *context) |
570 | { |
571 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
572 | struct pch_dma_slave *pd_slave = chan->private; |
573 | struct pch_dma_desc *first = NULL; |
574 | struct pch_dma_desc *prev = NULL; |
575 | struct pch_dma_desc *desc = NULL; |
576 | struct scatterlist *sg; |
577 | dma_addr_t reg; |
578 | int i; |
579 | |
580 | if (unlikely(!sg_len)) { |
581 | dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n" ); |
582 | return NULL; |
583 | } |
584 | |
585 | if (direction == DMA_DEV_TO_MEM) |
586 | reg = pd_slave->rx_reg; |
587 | else if (direction == DMA_MEM_TO_DEV) |
588 | reg = pd_slave->tx_reg; |
589 | else |
590 | return NULL; |
591 | |
592 | pd_chan->dir = direction; |
593 | pdc_set_dir(chan); |
594 | |
595 | for_each_sg(sgl, sg, sg_len, i) { |
596 | desc = pdc_desc_get(pd_chan); |
597 | |
598 | if (!desc) |
599 | goto err_desc_get; |
600 | |
601 | desc->regs.dev_addr = reg; |
602 | desc->regs.mem_addr = sg_dma_address(sg); |
603 | desc->regs.size = sg_dma_len(sg); |
604 | desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; |
605 | |
606 | switch (pd_slave->width) { |
607 | case PCH_DMA_WIDTH_1_BYTE: |
608 | if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE) |
609 | goto err_desc_get; |
610 | desc->regs.size |= DMA_DESC_WIDTH_1_BYTE; |
611 | break; |
612 | case PCH_DMA_WIDTH_2_BYTES: |
613 | if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES) |
614 | goto err_desc_get; |
615 | desc->regs.size |= DMA_DESC_WIDTH_2_BYTES; |
616 | break; |
617 | case PCH_DMA_WIDTH_4_BYTES: |
618 | if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES) |
619 | goto err_desc_get; |
620 | desc->regs.size |= DMA_DESC_WIDTH_4_BYTES; |
621 | break; |
622 | default: |
623 | goto err_desc_get; |
624 | } |
625 | |
626 | if (!first) { |
627 | first = desc; |
628 | } else { |
629 | prev->regs.next |= desc->txd.phys; |
630 | list_add_tail(new: &desc->desc_node, head: &first->tx_list); |
631 | } |
632 | |
633 | prev = desc; |
634 | } |
635 | |
636 | if (flags & DMA_PREP_INTERRUPT) |
637 | desc->regs.next = DMA_DESC_END_WITH_IRQ; |
638 | else |
639 | desc->regs.next = DMA_DESC_END_WITHOUT_IRQ; |
640 | |
641 | first->txd.cookie = -EBUSY; |
642 | desc->txd.flags = flags; |
643 | |
644 | return &first->txd; |
645 | |
646 | err_desc_get: |
647 | dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n" ); |
648 | pdc_desc_put(pd_chan, desc: first); |
649 | return NULL; |
650 | } |
651 | |
652 | static int pd_device_terminate_all(struct dma_chan *chan) |
653 | { |
654 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
655 | struct pch_dma_desc *desc, *_d; |
656 | LIST_HEAD(list); |
657 | |
658 | spin_lock_irq(lock: &pd_chan->lock); |
659 | |
660 | pdc_set_mode(chan: &pd_chan->chan, DMA_CTL0_DISABLE); |
661 | |
662 | list_splice_init(list: &pd_chan->active_list, head: &list); |
663 | list_splice_init(list: &pd_chan->queue, head: &list); |
664 | |
665 | list_for_each_entry_safe(desc, _d, &list, desc_node) |
666 | pdc_chain_complete(pd_chan, desc); |
667 | |
668 | spin_unlock_irq(lock: &pd_chan->lock); |
669 | |
670 | return 0; |
671 | } |
672 | |
673 | static void pdc_tasklet(struct tasklet_struct *t) |
674 | { |
675 | struct pch_dma_chan *pd_chan = from_tasklet(pd_chan, t, tasklet); |
676 | unsigned long flags; |
677 | |
678 | if (!pdc_is_idle(pd_chan)) { |
679 | dev_err(chan2dev(&pd_chan->chan), |
680 | "BUG: handle non-idle channel in tasklet\n" ); |
681 | return; |
682 | } |
683 | |
684 | spin_lock_irqsave(&pd_chan->lock, flags); |
685 | if (test_and_clear_bit(nr: 0, addr: &pd_chan->err_status)) |
686 | pdc_handle_error(pd_chan); |
687 | else |
688 | pdc_advance_work(pd_chan); |
689 | spin_unlock_irqrestore(lock: &pd_chan->lock, flags); |
690 | } |
691 | |
692 | static irqreturn_t pd_irq(int irq, void *devid) |
693 | { |
694 | struct pch_dma *pd = (struct pch_dma *)devid; |
695 | struct pch_dma_chan *pd_chan; |
696 | u32 sts0; |
697 | u32 sts2; |
698 | int i; |
699 | int ret0 = IRQ_NONE; |
700 | int ret2 = IRQ_NONE; |
701 | |
702 | sts0 = dma_readl(pd, STS0); |
703 | sts2 = dma_readl(pd, STS2); |
704 | |
705 | dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n" , sts0); |
706 | |
707 | for (i = 0; i < pd->dma.chancnt; i++) { |
708 | pd_chan = &pd->channels[i]; |
709 | |
710 | if (i < 8) { |
711 | if (sts0 & DMA_STATUS_IRQ(i)) { |
712 | if (sts0 & DMA_STATUS0_ERR(i)) |
713 | set_bit(nr: 0, addr: &pd_chan->err_status); |
714 | |
715 | tasklet_schedule(t: &pd_chan->tasklet); |
716 | ret0 = IRQ_HANDLED; |
717 | } |
718 | } else { |
719 | if (sts2 & DMA_STATUS_IRQ(i - 8)) { |
720 | if (sts2 & DMA_STATUS2_ERR(i)) |
721 | set_bit(nr: 0, addr: &pd_chan->err_status); |
722 | |
723 | tasklet_schedule(t: &pd_chan->tasklet); |
724 | ret2 = IRQ_HANDLED; |
725 | } |
726 | } |
727 | } |
728 | |
729 | /* clear interrupt bits in status register */ |
730 | if (ret0) |
731 | dma_writel(pd, STS0, sts0); |
732 | if (ret2) |
733 | dma_writel(pd, STS2, sts2); |
734 | |
735 | return ret0 | ret2; |
736 | } |
737 | |
738 | static void __maybe_unused pch_dma_save_regs(struct pch_dma *pd) |
739 | { |
740 | struct pch_dma_chan *pd_chan; |
741 | struct dma_chan *chan, *_c; |
742 | int i = 0; |
743 | |
744 | pd->regs.dma_ctl0 = dma_readl(pd, CTL0); |
745 | pd->regs.dma_ctl1 = dma_readl(pd, CTL1); |
746 | pd->regs.dma_ctl2 = dma_readl(pd, CTL2); |
747 | pd->regs.dma_ctl3 = dma_readl(pd, CTL3); |
748 | |
749 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { |
750 | pd_chan = to_pd_chan(chan); |
751 | |
752 | pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR); |
753 | pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR); |
754 | pd->ch_regs[i].size = channel_readl(pd_chan, SIZE); |
755 | pd->ch_regs[i].next = channel_readl(pd_chan, NEXT); |
756 | |
757 | i++; |
758 | } |
759 | } |
760 | |
761 | static void __maybe_unused pch_dma_restore_regs(struct pch_dma *pd) |
762 | { |
763 | struct pch_dma_chan *pd_chan; |
764 | struct dma_chan *chan, *_c; |
765 | int i = 0; |
766 | |
767 | dma_writel(pd, CTL0, pd->regs.dma_ctl0); |
768 | dma_writel(pd, CTL1, pd->regs.dma_ctl1); |
769 | dma_writel(pd, CTL2, pd->regs.dma_ctl2); |
770 | dma_writel(pd, CTL3, pd->regs.dma_ctl3); |
771 | |
772 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { |
773 | pd_chan = to_pd_chan(chan); |
774 | |
775 | channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr); |
776 | channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr); |
777 | channel_writel(pd_chan, SIZE, pd->ch_regs[i].size); |
778 | channel_writel(pd_chan, NEXT, pd->ch_regs[i].next); |
779 | |
780 | i++; |
781 | } |
782 | } |
783 | |
784 | static int __maybe_unused pch_dma_suspend(struct device *dev) |
785 | { |
786 | struct pch_dma *pd = dev_get_drvdata(dev); |
787 | |
788 | if (pd) |
789 | pch_dma_save_regs(pd); |
790 | |
791 | return 0; |
792 | } |
793 | |
794 | static int __maybe_unused pch_dma_resume(struct device *dev) |
795 | { |
796 | struct pch_dma *pd = dev_get_drvdata(dev); |
797 | |
798 | if (pd) |
799 | pch_dma_restore_regs(pd); |
800 | |
801 | return 0; |
802 | } |
803 | |
804 | static int pch_dma_probe(struct pci_dev *pdev, |
805 | const struct pci_device_id *id) |
806 | { |
807 | struct pch_dma *pd; |
808 | struct pch_dma_regs *regs; |
809 | unsigned int nr_channels; |
810 | int err; |
811 | int i; |
812 | |
813 | nr_channels = id->driver_data; |
814 | pd = kzalloc(size: sizeof(*pd), GFP_KERNEL); |
815 | if (!pd) |
816 | return -ENOMEM; |
817 | |
818 | pci_set_drvdata(pdev, data: pd); |
819 | |
820 | err = pci_enable_device(dev: pdev); |
821 | if (err) { |
822 | dev_err(&pdev->dev, "Cannot enable PCI device\n" ); |
823 | goto err_free_mem; |
824 | } |
825 | |
826 | if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { |
827 | dev_err(&pdev->dev, "Cannot find proper base address\n" ); |
828 | err = -ENODEV; |
829 | goto err_disable_pdev; |
830 | } |
831 | |
832 | err = pci_request_regions(pdev, DRV_NAME); |
833 | if (err) { |
834 | dev_err(&pdev->dev, "Cannot obtain PCI resources\n" ); |
835 | goto err_disable_pdev; |
836 | } |
837 | |
838 | err = dma_set_mask(dev: &pdev->dev, DMA_BIT_MASK(32)); |
839 | if (err) { |
840 | dev_err(&pdev->dev, "Cannot set proper DMA config\n" ); |
841 | goto err_free_res; |
842 | } |
843 | |
844 | regs = pd->membase = pci_iomap(dev: pdev, bar: 1, max: 0); |
845 | if (!pd->membase) { |
846 | dev_err(&pdev->dev, "Cannot map MMIO registers\n" ); |
847 | err = -ENOMEM; |
848 | goto err_free_res; |
849 | } |
850 | |
851 | pci_set_master(dev: pdev); |
852 | pd->dma.dev = &pdev->dev; |
853 | |
854 | err = request_irq(irq: pdev->irq, handler: pd_irq, IRQF_SHARED, DRV_NAME, dev: pd); |
855 | if (err) { |
856 | dev_err(&pdev->dev, "Failed to request IRQ\n" ); |
857 | goto err_iounmap; |
858 | } |
859 | |
860 | pd->pool = dma_pool_create(name: "pch_dma_desc_pool" , dev: &pdev->dev, |
861 | size: sizeof(struct pch_dma_desc), align: 4, allocation: 0); |
862 | if (!pd->pool) { |
863 | dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n" ); |
864 | err = -ENOMEM; |
865 | goto err_free_irq; |
866 | } |
867 | |
868 | |
869 | INIT_LIST_HEAD(list: &pd->dma.channels); |
870 | |
871 | for (i = 0; i < nr_channels; i++) { |
872 | struct pch_dma_chan *pd_chan = &pd->channels[i]; |
873 | |
874 | pd_chan->chan.device = &pd->dma; |
875 | dma_cookie_init(chan: &pd_chan->chan); |
876 | |
877 | pd_chan->membase = ®s->desc[i]; |
878 | |
879 | spin_lock_init(&pd_chan->lock); |
880 | |
881 | INIT_LIST_HEAD(list: &pd_chan->active_list); |
882 | INIT_LIST_HEAD(list: &pd_chan->queue); |
883 | INIT_LIST_HEAD(list: &pd_chan->free_list); |
884 | |
885 | tasklet_setup(t: &pd_chan->tasklet, callback: pdc_tasklet); |
886 | list_add_tail(new: &pd_chan->chan.device_node, head: &pd->dma.channels); |
887 | } |
888 | |
889 | dma_cap_zero(pd->dma.cap_mask); |
890 | dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask); |
891 | dma_cap_set(DMA_SLAVE, pd->dma.cap_mask); |
892 | |
893 | pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources; |
894 | pd->dma.device_free_chan_resources = pd_free_chan_resources; |
895 | pd->dma.device_tx_status = pd_tx_status; |
896 | pd->dma.device_issue_pending = pd_issue_pending; |
897 | pd->dma.device_prep_slave_sg = pd_prep_slave_sg; |
898 | pd->dma.device_terminate_all = pd_device_terminate_all; |
899 | |
900 | err = dma_async_device_register(device: &pd->dma); |
901 | if (err) { |
902 | dev_err(&pdev->dev, "Failed to register DMA device\n" ); |
903 | goto err_free_pool; |
904 | } |
905 | |
906 | return 0; |
907 | |
908 | err_free_pool: |
909 | dma_pool_destroy(pool: pd->pool); |
910 | err_free_irq: |
911 | free_irq(pdev->irq, pd); |
912 | err_iounmap: |
913 | pci_iounmap(dev: pdev, pd->membase); |
914 | err_free_res: |
915 | pci_release_regions(pdev); |
916 | err_disable_pdev: |
917 | pci_disable_device(dev: pdev); |
918 | err_free_mem: |
919 | kfree(objp: pd); |
920 | return err; |
921 | } |
922 | |
923 | static void pch_dma_remove(struct pci_dev *pdev) |
924 | { |
925 | struct pch_dma *pd = pci_get_drvdata(pdev); |
926 | struct pch_dma_chan *pd_chan; |
927 | struct dma_chan *chan, *_c; |
928 | |
929 | if (pd) { |
930 | dma_async_device_unregister(device: &pd->dma); |
931 | |
932 | free_irq(pdev->irq, pd); |
933 | |
934 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, |
935 | device_node) { |
936 | pd_chan = to_pd_chan(chan); |
937 | |
938 | tasklet_kill(t: &pd_chan->tasklet); |
939 | } |
940 | |
941 | dma_pool_destroy(pool: pd->pool); |
942 | pci_iounmap(dev: pdev, pd->membase); |
943 | pci_release_regions(pdev); |
944 | pci_disable_device(dev: pdev); |
945 | kfree(objp: pd); |
946 | } |
947 | } |
948 | |
949 | /* PCI Device ID of DMA device */ |
950 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810 |
951 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815 |
952 | #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026 |
953 | #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B |
954 | #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034 |
955 | #define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032 |
956 | #define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B |
957 | #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E |
958 | #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 |
959 | #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B |
960 | #define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810 |
961 | #define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815 |
962 | |
963 | static const struct pci_device_id pch_dma_id_table[] = { |
964 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, |
965 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, |
966 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ |
967 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */ |
968 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */ |
969 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */ |
970 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */ |
971 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ |
972 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ |
973 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ |
974 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */ |
975 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */ |
976 | { 0, }, |
977 | }; |
978 | |
979 | static SIMPLE_DEV_PM_OPS(pch_dma_pm_ops, pch_dma_suspend, pch_dma_resume); |
980 | |
981 | static struct pci_driver pch_dma_driver = { |
982 | .name = DRV_NAME, |
983 | .id_table = pch_dma_id_table, |
984 | .probe = pch_dma_probe, |
985 | .remove = pch_dma_remove, |
986 | .driver.pm = &pch_dma_pm_ops, |
987 | }; |
988 | |
989 | module_pci_driver(pch_dma_driver); |
990 | |
991 | MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH " |
992 | "DMA controller driver" ); |
993 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>" ); |
994 | MODULE_LICENSE("GPL v2" ); |
995 | MODULE_DEVICE_TABLE(pci, pch_dma_id_table); |
996 | |