1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * DMA driver for Nvidia's Tegra20 APB DMA controller. |
4 | * |
5 | * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved. |
6 | */ |
7 | |
8 | #include <linux/bitops.h> |
9 | #include <linux/clk.h> |
10 | #include <linux/delay.h> |
11 | #include <linux/dmaengine.h> |
12 | #include <linux/dma-mapping.h> |
13 | #include <linux/err.h> |
14 | #include <linux/init.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/io.h> |
17 | #include <linux/mm.h> |
18 | #include <linux/module.h> |
19 | #include <linux/of.h> |
20 | #include <linux/of_dma.h> |
21 | #include <linux/platform_device.h> |
22 | #include <linux/pm.h> |
23 | #include <linux/pm_runtime.h> |
24 | #include <linux/reset.h> |
25 | #include <linux/slab.h> |
26 | #include <linux/wait.h> |
27 | |
28 | #include "dmaengine.h" |
29 | |
30 | #define CREATE_TRACE_POINTS |
31 | #include <trace/events/tegra_apb_dma.h> |
32 | |
33 | #define TEGRA_APBDMA_GENERAL 0x0 |
34 | #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31) |
35 | |
36 | #define TEGRA_APBDMA_CONTROL 0x010 |
37 | #define TEGRA_APBDMA_IRQ_MASK 0x01c |
38 | #define TEGRA_APBDMA_IRQ_MASK_SET 0x020 |
39 | |
40 | /* CSR register */ |
41 | #define TEGRA_APBDMA_CHAN_CSR 0x00 |
42 | #define TEGRA_APBDMA_CSR_ENB BIT(31) |
43 | #define TEGRA_APBDMA_CSR_IE_EOC BIT(30) |
44 | #define TEGRA_APBDMA_CSR_HOLD BIT(29) |
45 | #define TEGRA_APBDMA_CSR_DIR BIT(28) |
46 | #define TEGRA_APBDMA_CSR_ONCE BIT(27) |
47 | #define TEGRA_APBDMA_CSR_FLOW BIT(21) |
48 | #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16 |
49 | #define TEGRA_APBDMA_CSR_REQ_SEL_MASK 0x1F |
50 | #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC |
51 | |
52 | /* STATUS register */ |
53 | #define TEGRA_APBDMA_CHAN_STATUS 0x004 |
54 | #define TEGRA_APBDMA_STATUS_BUSY BIT(31) |
55 | #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30) |
56 | #define TEGRA_APBDMA_STATUS_HALT BIT(29) |
57 | #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28) |
58 | #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2 |
59 | #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC |
60 | |
61 | #define TEGRA_APBDMA_CHAN_CSRE 0x00C |
62 | #define TEGRA_APBDMA_CHAN_CSRE_PAUSE BIT(31) |
63 | |
64 | /* AHB memory address */ |
65 | #define TEGRA_APBDMA_CHAN_AHBPTR 0x010 |
66 | |
67 | /* AHB sequence register */ |
68 | #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14 |
69 | #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31) |
70 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28) |
71 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28) |
72 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28) |
73 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28) |
74 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28) |
75 | #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27) |
76 | #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24) |
77 | #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24) |
78 | #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24) |
79 | #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19) |
80 | #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16 |
81 | #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0 |
82 | |
83 | /* APB address */ |
84 | #define TEGRA_APBDMA_CHAN_APBPTR 0x018 |
85 | |
86 | /* APB sequence register */ |
87 | #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c |
88 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28) |
89 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28) |
90 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28) |
91 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28) |
92 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28) |
93 | #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27) |
94 | #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16) |
95 | |
96 | /* Tegra148 specific registers */ |
97 | #define TEGRA_APBDMA_CHAN_WCOUNT 0x20 |
98 | |
99 | #define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24 |
100 | |
101 | /* |
102 | * If any burst is in flight and DMA paused then this is the time to complete |
103 | * on-flight burst and update DMA status register. |
104 | */ |
105 | #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20 |
106 | |
107 | /* Channel base address offset from APBDMA base address */ |
108 | #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000 |
109 | |
110 | #define TEGRA_APBDMA_SLAVE_ID_INVALID (TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1) |
111 | |
112 | struct tegra_dma; |
113 | |
114 | /* |
115 | * tegra_dma_chip_data Tegra chip specific DMA data |
116 | * @nr_channels: Number of channels available in the controller. |
117 | * @channel_reg_size: Channel register size/stride. |
118 | * @max_dma_count: Maximum DMA transfer count supported by DMA controller. |
119 | * @support_channel_pause: Support channel wise pause of dma. |
120 | * @support_separate_wcount_reg: Support separate word count register. |
121 | */ |
122 | struct tegra_dma_chip_data { |
123 | unsigned int nr_channels; |
124 | unsigned int channel_reg_size; |
125 | unsigned int max_dma_count; |
126 | bool support_channel_pause; |
127 | bool support_separate_wcount_reg; |
128 | }; |
129 | |
130 | /* DMA channel registers */ |
131 | struct tegra_dma_channel_regs { |
132 | u32 csr; |
133 | u32 ahb_ptr; |
134 | u32 apb_ptr; |
135 | u32 ahb_seq; |
136 | u32 apb_seq; |
137 | u32 wcount; |
138 | }; |
139 | |
140 | /* |
141 | * tegra_dma_sg_req: DMA request details to configure hardware. This |
142 | * contains the details for one transfer to configure DMA hw. |
143 | * The client's request for data transfer can be broken into multiple |
144 | * sub-transfer as per requester details and hw support. |
145 | * This sub transfer get added in the list of transfer and point to Tegra |
146 | * DMA descriptor which manages the transfer details. |
147 | */ |
148 | struct tegra_dma_sg_req { |
149 | struct tegra_dma_channel_regs ch_regs; |
150 | unsigned int req_len; |
151 | bool configured; |
152 | bool last_sg; |
153 | struct list_head node; |
154 | struct tegra_dma_desc *dma_desc; |
155 | unsigned int words_xferred; |
156 | }; |
157 | |
158 | /* |
159 | * tegra_dma_desc: Tegra DMA descriptors which manages the client requests. |
160 | * This descriptor keep track of transfer status, callbacks and request |
161 | * counts etc. |
162 | */ |
163 | struct tegra_dma_desc { |
164 | struct dma_async_tx_descriptor txd; |
165 | unsigned int bytes_requested; |
166 | unsigned int bytes_transferred; |
167 | enum dma_status dma_status; |
168 | struct list_head node; |
169 | struct list_head tx_list; |
170 | struct list_head cb_node; |
171 | unsigned int cb_count; |
172 | }; |
173 | |
174 | struct tegra_dma_channel; |
175 | |
176 | typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc, |
177 | bool to_terminate); |
178 | |
179 | /* tegra_dma_channel: Channel specific information */ |
180 | struct tegra_dma_channel { |
181 | struct dma_chan dma_chan; |
182 | char name[12]; |
183 | bool config_init; |
184 | unsigned int id; |
185 | void __iomem *chan_addr; |
186 | spinlock_t lock; |
187 | bool busy; |
188 | struct tegra_dma *tdma; |
189 | bool cyclic; |
190 | |
191 | /* Different lists for managing the requests */ |
192 | struct list_head free_sg_req; |
193 | struct list_head pending_sg_req; |
194 | struct list_head free_dma_desc; |
195 | struct list_head cb_desc; |
196 | |
197 | /* ISR handler and tasklet for bottom half of isr handling */ |
198 | dma_isr_handler isr_handler; |
199 | struct tasklet_struct tasklet; |
200 | |
201 | /* Channel-slave specific configuration */ |
202 | unsigned int slave_id; |
203 | struct dma_slave_config dma_sconfig; |
204 | struct tegra_dma_channel_regs channel_reg; |
205 | |
206 | struct wait_queue_head wq; |
207 | }; |
208 | |
209 | /* tegra_dma: Tegra DMA specific information */ |
210 | struct tegra_dma { |
211 | struct dma_device dma_dev; |
212 | struct device *dev; |
213 | struct clk *dma_clk; |
214 | struct reset_control *rst; |
215 | spinlock_t global_lock; |
216 | void __iomem *base_addr; |
217 | const struct tegra_dma_chip_data *chip_data; |
218 | |
219 | /* |
220 | * Counter for managing global pausing of the DMA controller. |
221 | * Only applicable for devices that don't support individual |
222 | * channel pausing. |
223 | */ |
224 | u32 global_pause_count; |
225 | |
226 | /* Last member of the structure */ |
227 | struct tegra_dma_channel channels[]; |
228 | }; |
229 | |
230 | static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val) |
231 | { |
232 | writel(val, addr: tdma->base_addr + reg); |
233 | } |
234 | |
235 | static inline void tdc_write(struct tegra_dma_channel *tdc, |
236 | u32 reg, u32 val) |
237 | { |
238 | writel(val, addr: tdc->chan_addr + reg); |
239 | } |
240 | |
241 | static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg) |
242 | { |
243 | return readl(addr: tdc->chan_addr + reg); |
244 | } |
245 | |
246 | static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) |
247 | { |
248 | return container_of(dc, struct tegra_dma_channel, dma_chan); |
249 | } |
250 | |
251 | static inline struct tegra_dma_desc * |
252 | txd_to_tegra_dma_desc(struct dma_async_tx_descriptor *td) |
253 | { |
254 | return container_of(td, struct tegra_dma_desc, txd); |
255 | } |
256 | |
257 | static inline struct device *tdc2dev(struct tegra_dma_channel *tdc) |
258 | { |
259 | return &tdc->dma_chan.dev->device; |
260 | } |
261 | |
262 | static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx); |
263 | |
264 | /* Get DMA desc from free list, if not there then allocate it. */ |
265 | static struct tegra_dma_desc *tegra_dma_desc_get(struct tegra_dma_channel *tdc) |
266 | { |
267 | struct tegra_dma_desc *dma_desc; |
268 | unsigned long flags; |
269 | |
270 | spin_lock_irqsave(&tdc->lock, flags); |
271 | |
272 | /* Do not allocate if desc are waiting for ack */ |
273 | list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { |
274 | if (async_tx_test_ack(tx: &dma_desc->txd) && !dma_desc->cb_count) { |
275 | list_del(entry: &dma_desc->node); |
276 | spin_unlock_irqrestore(lock: &tdc->lock, flags); |
277 | dma_desc->txd.flags = 0; |
278 | return dma_desc; |
279 | } |
280 | } |
281 | |
282 | spin_unlock_irqrestore(lock: &tdc->lock, flags); |
283 | |
284 | /* Allocate DMA desc */ |
285 | dma_desc = kzalloc(size: sizeof(*dma_desc), GFP_NOWAIT); |
286 | if (!dma_desc) |
287 | return NULL; |
288 | |
289 | dma_async_tx_descriptor_init(tx: &dma_desc->txd, chan: &tdc->dma_chan); |
290 | dma_desc->txd.tx_submit = tegra_dma_tx_submit; |
291 | dma_desc->txd.flags = 0; |
292 | |
293 | return dma_desc; |
294 | } |
295 | |
296 | static void tegra_dma_desc_put(struct tegra_dma_channel *tdc, |
297 | struct tegra_dma_desc *dma_desc) |
298 | { |
299 | unsigned long flags; |
300 | |
301 | spin_lock_irqsave(&tdc->lock, flags); |
302 | if (!list_empty(head: &dma_desc->tx_list)) |
303 | list_splice_init(list: &dma_desc->tx_list, head: &tdc->free_sg_req); |
304 | list_add_tail(new: &dma_desc->node, head: &tdc->free_dma_desc); |
305 | spin_unlock_irqrestore(lock: &tdc->lock, flags); |
306 | } |
307 | |
308 | static struct tegra_dma_sg_req * |
309 | tegra_dma_sg_req_get(struct tegra_dma_channel *tdc) |
310 | { |
311 | struct tegra_dma_sg_req *sg_req; |
312 | unsigned long flags; |
313 | |
314 | spin_lock_irqsave(&tdc->lock, flags); |
315 | if (!list_empty(head: &tdc->free_sg_req)) { |
316 | sg_req = list_first_entry(&tdc->free_sg_req, typeof(*sg_req), |
317 | node); |
318 | list_del(entry: &sg_req->node); |
319 | spin_unlock_irqrestore(lock: &tdc->lock, flags); |
320 | return sg_req; |
321 | } |
322 | spin_unlock_irqrestore(lock: &tdc->lock, flags); |
323 | |
324 | sg_req = kzalloc(size: sizeof(*sg_req), GFP_NOWAIT); |
325 | |
326 | return sg_req; |
327 | } |
328 | |
329 | static int tegra_dma_slave_config(struct dma_chan *dc, |
330 | struct dma_slave_config *sconfig) |
331 | { |
332 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
333 | |
334 | if (!list_empty(head: &tdc->pending_sg_req)) { |
335 | dev_err(tdc2dev(tdc), "Configuration not allowed\n" ); |
336 | return -EBUSY; |
337 | } |
338 | |
339 | memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); |
340 | tdc->config_init = true; |
341 | |
342 | return 0; |
343 | } |
344 | |
345 | static void tegra_dma_global_pause(struct tegra_dma_channel *tdc, |
346 | bool wait_for_burst_complete) |
347 | { |
348 | struct tegra_dma *tdma = tdc->tdma; |
349 | |
350 | spin_lock(lock: &tdma->global_lock); |
351 | |
352 | if (tdc->tdma->global_pause_count == 0) { |
353 | tdma_write(tdma, TEGRA_APBDMA_GENERAL, val: 0); |
354 | if (wait_for_burst_complete) |
355 | udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); |
356 | } |
357 | |
358 | tdc->tdma->global_pause_count++; |
359 | |
360 | spin_unlock(lock: &tdma->global_lock); |
361 | } |
362 | |
363 | static void tegra_dma_global_resume(struct tegra_dma_channel *tdc) |
364 | { |
365 | struct tegra_dma *tdma = tdc->tdma; |
366 | |
367 | spin_lock(lock: &tdma->global_lock); |
368 | |
369 | if (WARN_ON(tdc->tdma->global_pause_count == 0)) |
370 | goto out; |
371 | |
372 | if (--tdc->tdma->global_pause_count == 0) |
373 | tdma_write(tdma, TEGRA_APBDMA_GENERAL, |
374 | TEGRA_APBDMA_GENERAL_ENABLE); |
375 | |
376 | out: |
377 | spin_unlock(lock: &tdma->global_lock); |
378 | } |
379 | |
380 | static void tegra_dma_pause(struct tegra_dma_channel *tdc, |
381 | bool wait_for_burst_complete) |
382 | { |
383 | struct tegra_dma *tdma = tdc->tdma; |
384 | |
385 | if (tdma->chip_data->support_channel_pause) { |
386 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, |
387 | TEGRA_APBDMA_CHAN_CSRE_PAUSE); |
388 | if (wait_for_burst_complete) |
389 | udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); |
390 | } else { |
391 | tegra_dma_global_pause(tdc, wait_for_burst_complete); |
392 | } |
393 | } |
394 | |
395 | static void tegra_dma_resume(struct tegra_dma_channel *tdc) |
396 | { |
397 | struct tegra_dma *tdma = tdc->tdma; |
398 | |
399 | if (tdma->chip_data->support_channel_pause) |
400 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, val: 0); |
401 | else |
402 | tegra_dma_global_resume(tdc); |
403 | } |
404 | |
405 | static void tegra_dma_stop(struct tegra_dma_channel *tdc) |
406 | { |
407 | u32 csr, status; |
408 | |
409 | /* Disable interrupts */ |
410 | csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); |
411 | csr &= ~TEGRA_APBDMA_CSR_IE_EOC; |
412 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, val: csr); |
413 | |
414 | /* Disable DMA */ |
415 | csr &= ~TEGRA_APBDMA_CSR_ENB; |
416 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, val: csr); |
417 | |
418 | /* Clear interrupt status if it is there */ |
419 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); |
420 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { |
421 | dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n" , __func__); |
422 | tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, val: status); |
423 | } |
424 | tdc->busy = false; |
425 | } |
426 | |
427 | static void tegra_dma_start(struct tegra_dma_channel *tdc, |
428 | struct tegra_dma_sg_req *sg_req) |
429 | { |
430 | struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs; |
431 | |
432 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, val: ch_regs->csr); |
433 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, val: ch_regs->apb_seq); |
434 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, val: ch_regs->apb_ptr); |
435 | tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, val: ch_regs->ahb_seq); |
436 | tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, val: ch_regs->ahb_ptr); |
437 | if (tdc->tdma->chip_data->support_separate_wcount_reg) |
438 | tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, val: ch_regs->wcount); |
439 | |
440 | /* Start DMA */ |
441 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, |
442 | val: ch_regs->csr | TEGRA_APBDMA_CSR_ENB); |
443 | } |
444 | |
445 | static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, |
446 | struct tegra_dma_sg_req *nsg_req) |
447 | { |
448 | unsigned long status; |
449 | |
450 | /* |
451 | * The DMA controller reloads the new configuration for next transfer |
452 | * after last burst of current transfer completes. |
453 | * If there is no IEC status then this makes sure that last burst |
454 | * has not be completed. There may be case that last burst is on |
455 | * flight and so it can complete but because DMA is paused, it |
456 | * will not generates interrupt as well as not reload the new |
457 | * configuration. |
458 | * If there is already IEC status then interrupt handler need to |
459 | * load new configuration. |
460 | */ |
461 | tegra_dma_pause(tdc, wait_for_burst_complete: false); |
462 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); |
463 | |
464 | /* |
465 | * If interrupt is pending then do nothing as the ISR will handle |
466 | * the programing for new request. |
467 | */ |
468 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { |
469 | dev_err(tdc2dev(tdc), |
470 | "Skipping new configuration as interrupt is pending\n" ); |
471 | tegra_dma_resume(tdc); |
472 | return; |
473 | } |
474 | |
475 | /* Safe to program new configuration */ |
476 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, val: nsg_req->ch_regs.apb_ptr); |
477 | tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, val: nsg_req->ch_regs.ahb_ptr); |
478 | if (tdc->tdma->chip_data->support_separate_wcount_reg) |
479 | tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, |
480 | val: nsg_req->ch_regs.wcount); |
481 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, |
482 | val: nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); |
483 | nsg_req->configured = true; |
484 | nsg_req->words_xferred = 0; |
485 | |
486 | tegra_dma_resume(tdc); |
487 | } |
488 | |
489 | static void tdc_start_head_req(struct tegra_dma_channel *tdc) |
490 | { |
491 | struct tegra_dma_sg_req *sg_req; |
492 | |
493 | sg_req = list_first_entry(&tdc->pending_sg_req, typeof(*sg_req), node); |
494 | tegra_dma_start(tdc, sg_req); |
495 | sg_req->configured = true; |
496 | sg_req->words_xferred = 0; |
497 | tdc->busy = true; |
498 | } |
499 | |
500 | static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc) |
501 | { |
502 | struct tegra_dma_sg_req *hsgreq, *hnsgreq; |
503 | |
504 | hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); |
505 | if (!list_is_last(list: &hsgreq->node, head: &tdc->pending_sg_req)) { |
506 | hnsgreq = list_first_entry(&hsgreq->node, typeof(*hnsgreq), |
507 | node); |
508 | tegra_dma_configure_for_next(tdc, nsg_req: hnsgreq); |
509 | } |
510 | } |
511 | |
512 | static inline unsigned int |
513 | get_current_xferred_count(struct tegra_dma_channel *tdc, |
514 | struct tegra_dma_sg_req *sg_req, |
515 | unsigned long status) |
516 | { |
517 | return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4; |
518 | } |
519 | |
520 | static void tegra_dma_abort_all(struct tegra_dma_channel *tdc) |
521 | { |
522 | struct tegra_dma_desc *dma_desc; |
523 | struct tegra_dma_sg_req *sgreq; |
524 | |
525 | while (!list_empty(head: &tdc->pending_sg_req)) { |
526 | sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), |
527 | node); |
528 | list_move_tail(list: &sgreq->node, head: &tdc->free_sg_req); |
529 | if (sgreq->last_sg) { |
530 | dma_desc = sgreq->dma_desc; |
531 | dma_desc->dma_status = DMA_ERROR; |
532 | list_add_tail(new: &dma_desc->node, head: &tdc->free_dma_desc); |
533 | |
534 | /* Add in cb list if it is not there. */ |
535 | if (!dma_desc->cb_count) |
536 | list_add_tail(new: &dma_desc->cb_node, |
537 | head: &tdc->cb_desc); |
538 | dma_desc->cb_count++; |
539 | } |
540 | } |
541 | tdc->isr_handler = NULL; |
542 | } |
543 | |
544 | static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, |
545 | bool to_terminate) |
546 | { |
547 | struct tegra_dma_sg_req *hsgreq; |
548 | |
549 | /* |
550 | * Check that head req on list should be in flight. |
551 | * If it is not in flight then abort transfer as |
552 | * looping of transfer can not continue. |
553 | */ |
554 | hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); |
555 | if (!hsgreq->configured) { |
556 | tegra_dma_stop(tdc); |
557 | pm_runtime_put(dev: tdc->tdma->dev); |
558 | dev_err(tdc2dev(tdc), "DMA transfer underflow, aborting DMA\n" ); |
559 | tegra_dma_abort_all(tdc); |
560 | return false; |
561 | } |
562 | |
563 | /* Configure next request */ |
564 | if (!to_terminate) |
565 | tdc_configure_next_head_desc(tdc); |
566 | |
567 | return true; |
568 | } |
569 | |
570 | static void handle_once_dma_done(struct tegra_dma_channel *tdc, |
571 | bool to_terminate) |
572 | { |
573 | struct tegra_dma_desc *dma_desc; |
574 | struct tegra_dma_sg_req *sgreq; |
575 | |
576 | tdc->busy = false; |
577 | sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); |
578 | dma_desc = sgreq->dma_desc; |
579 | dma_desc->bytes_transferred += sgreq->req_len; |
580 | |
581 | list_del(entry: &sgreq->node); |
582 | if (sgreq->last_sg) { |
583 | dma_desc->dma_status = DMA_COMPLETE; |
584 | dma_cookie_complete(tx: &dma_desc->txd); |
585 | if (!dma_desc->cb_count) |
586 | list_add_tail(new: &dma_desc->cb_node, head: &tdc->cb_desc); |
587 | dma_desc->cb_count++; |
588 | list_add_tail(new: &dma_desc->node, head: &tdc->free_dma_desc); |
589 | } |
590 | list_add_tail(new: &sgreq->node, head: &tdc->free_sg_req); |
591 | |
592 | /* Do not start DMA if it is going to be terminate */ |
593 | if (to_terminate) |
594 | return; |
595 | |
596 | if (list_empty(head: &tdc->pending_sg_req)) { |
597 | pm_runtime_put(dev: tdc->tdma->dev); |
598 | return; |
599 | } |
600 | |
601 | tdc_start_head_req(tdc); |
602 | } |
603 | |
604 | static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, |
605 | bool to_terminate) |
606 | { |
607 | struct tegra_dma_desc *dma_desc; |
608 | struct tegra_dma_sg_req *sgreq; |
609 | bool st; |
610 | |
611 | sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); |
612 | dma_desc = sgreq->dma_desc; |
613 | /* if we dma for long enough the transfer count will wrap */ |
614 | dma_desc->bytes_transferred = |
615 | (dma_desc->bytes_transferred + sgreq->req_len) % |
616 | dma_desc->bytes_requested; |
617 | |
618 | /* Callback need to be call */ |
619 | if (!dma_desc->cb_count) |
620 | list_add_tail(new: &dma_desc->cb_node, head: &tdc->cb_desc); |
621 | dma_desc->cb_count++; |
622 | |
623 | sgreq->words_xferred = 0; |
624 | |
625 | /* If not last req then put at end of pending list */ |
626 | if (!list_is_last(list: &sgreq->node, head: &tdc->pending_sg_req)) { |
627 | list_move_tail(list: &sgreq->node, head: &tdc->pending_sg_req); |
628 | sgreq->configured = false; |
629 | st = handle_continuous_head_request(tdc, to_terminate); |
630 | if (!st) |
631 | dma_desc->dma_status = DMA_ERROR; |
632 | } |
633 | } |
634 | |
635 | static void tegra_dma_tasklet(struct tasklet_struct *t) |
636 | { |
637 | struct tegra_dma_channel *tdc = from_tasklet(tdc, t, tasklet); |
638 | struct dmaengine_desc_callback cb; |
639 | struct tegra_dma_desc *dma_desc; |
640 | unsigned int cb_count; |
641 | unsigned long flags; |
642 | |
643 | spin_lock_irqsave(&tdc->lock, flags); |
644 | while (!list_empty(head: &tdc->cb_desc)) { |
645 | dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc), |
646 | cb_node); |
647 | list_del(entry: &dma_desc->cb_node); |
648 | dmaengine_desc_get_callback(tx: &dma_desc->txd, cb: &cb); |
649 | cb_count = dma_desc->cb_count; |
650 | dma_desc->cb_count = 0; |
651 | trace_tegra_dma_complete_cb(dc: &tdc->dma_chan, count: cb_count, |
652 | ptr: cb.callback); |
653 | spin_unlock_irqrestore(lock: &tdc->lock, flags); |
654 | while (cb_count--) |
655 | dmaengine_desc_callback_invoke(cb: &cb, NULL); |
656 | spin_lock_irqsave(&tdc->lock, flags); |
657 | } |
658 | spin_unlock_irqrestore(lock: &tdc->lock, flags); |
659 | } |
660 | |
661 | static irqreturn_t tegra_dma_isr(int irq, void *dev_id) |
662 | { |
663 | struct tegra_dma_channel *tdc = dev_id; |
664 | u32 status; |
665 | |
666 | spin_lock(lock: &tdc->lock); |
667 | |
668 | trace_tegra_dma_isr(dc: &tdc->dma_chan, irq); |
669 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); |
670 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { |
671 | tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, val: status); |
672 | tdc->isr_handler(tdc, false); |
673 | tasklet_schedule(t: &tdc->tasklet); |
674 | wake_up_all(&tdc->wq); |
675 | spin_unlock(lock: &tdc->lock); |
676 | return IRQ_HANDLED; |
677 | } |
678 | |
679 | spin_unlock(lock: &tdc->lock); |
680 | dev_info(tdc2dev(tdc), "Interrupt already served status 0x%08x\n" , |
681 | status); |
682 | |
683 | return IRQ_NONE; |
684 | } |
685 | |
686 | static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd) |
687 | { |
688 | struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(td: txd); |
689 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc: txd->chan); |
690 | unsigned long flags; |
691 | dma_cookie_t cookie; |
692 | |
693 | spin_lock_irqsave(&tdc->lock, flags); |
694 | dma_desc->dma_status = DMA_IN_PROGRESS; |
695 | cookie = dma_cookie_assign(tx: &dma_desc->txd); |
696 | list_splice_tail_init(list: &dma_desc->tx_list, head: &tdc->pending_sg_req); |
697 | spin_unlock_irqrestore(lock: &tdc->lock, flags); |
698 | |
699 | return cookie; |
700 | } |
701 | |
702 | static void tegra_dma_issue_pending(struct dma_chan *dc) |
703 | { |
704 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
705 | unsigned long flags; |
706 | int err; |
707 | |
708 | spin_lock_irqsave(&tdc->lock, flags); |
709 | if (list_empty(head: &tdc->pending_sg_req)) { |
710 | dev_err(tdc2dev(tdc), "No DMA request\n" ); |
711 | goto end; |
712 | } |
713 | if (!tdc->busy) { |
714 | err = pm_runtime_resume_and_get(dev: tdc->tdma->dev); |
715 | if (err < 0) { |
716 | dev_err(tdc2dev(tdc), "Failed to enable DMA\n" ); |
717 | goto end; |
718 | } |
719 | |
720 | tdc_start_head_req(tdc); |
721 | |
722 | /* Continuous single mode: Configure next req */ |
723 | if (tdc->cyclic) { |
724 | /* |
725 | * Wait for 1 burst time for configure DMA for |
726 | * next transfer. |
727 | */ |
728 | udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); |
729 | tdc_configure_next_head_desc(tdc); |
730 | } |
731 | } |
732 | end: |
733 | spin_unlock_irqrestore(lock: &tdc->lock, flags); |
734 | } |
735 | |
736 | static int tegra_dma_terminate_all(struct dma_chan *dc) |
737 | { |
738 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
739 | struct tegra_dma_desc *dma_desc; |
740 | struct tegra_dma_sg_req *sgreq; |
741 | unsigned long flags; |
742 | u32 status, wcount; |
743 | bool was_busy; |
744 | |
745 | spin_lock_irqsave(&tdc->lock, flags); |
746 | |
747 | if (!tdc->busy) |
748 | goto skip_dma_stop; |
749 | |
750 | /* Pause DMA before checking the queue status */ |
751 | tegra_dma_pause(tdc, wait_for_burst_complete: true); |
752 | |
753 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); |
754 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { |
755 | dev_dbg(tdc2dev(tdc), "%s():handling isr\n" , __func__); |
756 | tdc->isr_handler(tdc, true); |
757 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); |
758 | } |
759 | if (tdc->tdma->chip_data->support_separate_wcount_reg) |
760 | wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER); |
761 | else |
762 | wcount = status; |
763 | |
764 | was_busy = tdc->busy; |
765 | tegra_dma_stop(tdc); |
766 | |
767 | if (!list_empty(head: &tdc->pending_sg_req) && was_busy) { |
768 | sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), |
769 | node); |
770 | sgreq->dma_desc->bytes_transferred += |
771 | get_current_xferred_count(tdc, sg_req: sgreq, status: wcount); |
772 | } |
773 | tegra_dma_resume(tdc); |
774 | |
775 | pm_runtime_put(dev: tdc->tdma->dev); |
776 | wake_up_all(&tdc->wq); |
777 | |
778 | skip_dma_stop: |
779 | tegra_dma_abort_all(tdc); |
780 | |
781 | while (!list_empty(head: &tdc->cb_desc)) { |
782 | dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc), |
783 | cb_node); |
784 | list_del(entry: &dma_desc->cb_node); |
785 | dma_desc->cb_count = 0; |
786 | } |
787 | spin_unlock_irqrestore(lock: &tdc->lock, flags); |
788 | |
789 | return 0; |
790 | } |
791 | |
792 | static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc) |
793 | { |
794 | unsigned long flags; |
795 | u32 status; |
796 | |
797 | spin_lock_irqsave(&tdc->lock, flags); |
798 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); |
799 | spin_unlock_irqrestore(lock: &tdc->lock, flags); |
800 | |
801 | return !(status & TEGRA_APBDMA_STATUS_ISE_EOC); |
802 | } |
803 | |
804 | static void tegra_dma_synchronize(struct dma_chan *dc) |
805 | { |
806 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
807 | int err; |
808 | |
809 | err = pm_runtime_resume_and_get(dev: tdc->tdma->dev); |
810 | if (err < 0) { |
811 | dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n" , err); |
812 | return; |
813 | } |
814 | |
815 | /* |
816 | * CPU, which handles interrupt, could be busy in |
817 | * uninterruptible state, in this case sibling CPU |
818 | * should wait until interrupt is handled. |
819 | */ |
820 | wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc)); |
821 | |
822 | tasklet_kill(t: &tdc->tasklet); |
823 | |
824 | pm_runtime_put(dev: tdc->tdma->dev); |
825 | } |
826 | |
827 | static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc, |
828 | struct tegra_dma_sg_req *sg_req) |
829 | { |
830 | u32 status, wcount = 0; |
831 | |
832 | if (!list_is_first(list: &sg_req->node, head: &tdc->pending_sg_req)) |
833 | return 0; |
834 | |
835 | if (tdc->tdma->chip_data->support_separate_wcount_reg) |
836 | wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER); |
837 | |
838 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); |
839 | |
840 | if (!tdc->tdma->chip_data->support_separate_wcount_reg) |
841 | wcount = status; |
842 | |
843 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) |
844 | return sg_req->req_len; |
845 | |
846 | wcount = get_current_xferred_count(tdc, sg_req, status: wcount); |
847 | |
848 | if (!wcount) { |
849 | /* |
850 | * If wcount wasn't ever polled for this SG before, then |
851 | * simply assume that transfer hasn't started yet. |
852 | * |
853 | * Otherwise it's the end of the transfer. |
854 | * |
855 | * The alternative would be to poll the status register |
856 | * until EOC bit is set or wcount goes UP. That's so |
857 | * because EOC bit is getting set only after the last |
858 | * burst's completion and counter is less than the actual |
859 | * transfer size by 4 bytes. The counter value wraps around |
860 | * in a cyclic mode before EOC is set(!), so we can't easily |
861 | * distinguish start of transfer from its end. |
862 | */ |
863 | if (sg_req->words_xferred) |
864 | wcount = sg_req->req_len - 4; |
865 | |
866 | } else if (wcount < sg_req->words_xferred) { |
867 | /* |
868 | * This case will never happen for a non-cyclic transfer. |
869 | * |
870 | * For a cyclic transfer, although it is possible for the |
871 | * next transfer to have already started (resetting the word |
872 | * count), this case should still not happen because we should |
873 | * have detected that the EOC bit is set and hence the transfer |
874 | * was completed. |
875 | */ |
876 | WARN_ON_ONCE(1); |
877 | |
878 | wcount = sg_req->req_len - 4; |
879 | } else { |
880 | sg_req->words_xferred = wcount; |
881 | } |
882 | |
883 | return wcount; |
884 | } |
885 | |
886 | static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, |
887 | dma_cookie_t cookie, |
888 | struct dma_tx_state *txstate) |
889 | { |
890 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
891 | struct tegra_dma_desc *dma_desc; |
892 | struct tegra_dma_sg_req *sg_req; |
893 | enum dma_status ret; |
894 | unsigned long flags; |
895 | unsigned int residual; |
896 | unsigned int bytes = 0; |
897 | |
898 | ret = dma_cookie_status(chan: dc, cookie, state: txstate); |
899 | if (ret == DMA_COMPLETE) |
900 | return ret; |
901 | |
902 | spin_lock_irqsave(&tdc->lock, flags); |
903 | |
904 | /* Check on wait_ack desc status */ |
905 | list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { |
906 | if (dma_desc->txd.cookie == cookie) { |
907 | ret = dma_desc->dma_status; |
908 | goto found; |
909 | } |
910 | } |
911 | |
912 | /* Check in pending list */ |
913 | list_for_each_entry(sg_req, &tdc->pending_sg_req, node) { |
914 | dma_desc = sg_req->dma_desc; |
915 | if (dma_desc->txd.cookie == cookie) { |
916 | bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req); |
917 | ret = dma_desc->dma_status; |
918 | goto found; |
919 | } |
920 | } |
921 | |
922 | dev_dbg(tdc2dev(tdc), "cookie %d not found\n" , cookie); |
923 | dma_desc = NULL; |
924 | |
925 | found: |
926 | if (dma_desc && txstate) { |
927 | residual = dma_desc->bytes_requested - |
928 | ((dma_desc->bytes_transferred + bytes) % |
929 | dma_desc->bytes_requested); |
930 | dma_set_residue(state: txstate, residue: residual); |
931 | } |
932 | |
933 | trace_tegra_dma_tx_status(dc: &tdc->dma_chan, cookie, state: txstate); |
934 | spin_unlock_irqrestore(lock: &tdc->lock, flags); |
935 | |
936 | return ret; |
937 | } |
938 | |
939 | static inline unsigned int get_bus_width(struct tegra_dma_channel *tdc, |
940 | enum dma_slave_buswidth slave_bw) |
941 | { |
942 | switch (slave_bw) { |
943 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
944 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8; |
945 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
946 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16; |
947 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
948 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; |
949 | case DMA_SLAVE_BUSWIDTH_8_BYTES: |
950 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64; |
951 | default: |
952 | dev_warn(tdc2dev(tdc), |
953 | "slave bw is not supported, using 32bits\n" ); |
954 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; |
955 | } |
956 | } |
957 | |
958 | static inline unsigned int get_burst_size(struct tegra_dma_channel *tdc, |
959 | u32 burst_size, |
960 | enum dma_slave_buswidth slave_bw, |
961 | u32 len) |
962 | { |
963 | unsigned int burst_byte, burst_ahb_width; |
964 | |
965 | /* |
966 | * burst_size from client is in terms of the bus_width. |
967 | * convert them into AHB memory width which is 4 byte. |
968 | */ |
969 | burst_byte = burst_size * slave_bw; |
970 | burst_ahb_width = burst_byte / 4; |
971 | |
972 | /* If burst size is 0 then calculate the burst size based on length */ |
973 | if (!burst_ahb_width) { |
974 | if (len & 0xF) |
975 | return TEGRA_APBDMA_AHBSEQ_BURST_1; |
976 | else if ((len >> 4) & 0x1) |
977 | return TEGRA_APBDMA_AHBSEQ_BURST_4; |
978 | else |
979 | return TEGRA_APBDMA_AHBSEQ_BURST_8; |
980 | } |
981 | if (burst_ahb_width < 4) |
982 | return TEGRA_APBDMA_AHBSEQ_BURST_1; |
983 | else if (burst_ahb_width < 8) |
984 | return TEGRA_APBDMA_AHBSEQ_BURST_4; |
985 | else |
986 | return TEGRA_APBDMA_AHBSEQ_BURST_8; |
987 | } |
988 | |
989 | static int get_transfer_param(struct tegra_dma_channel *tdc, |
990 | enum dma_transfer_direction direction, |
991 | u32 *apb_addr, |
992 | u32 *apb_seq, |
993 | u32 *csr, |
994 | unsigned int *burst_size, |
995 | enum dma_slave_buswidth *slave_bw) |
996 | { |
997 | switch (direction) { |
998 | case DMA_MEM_TO_DEV: |
999 | *apb_addr = tdc->dma_sconfig.dst_addr; |
1000 | *apb_seq = get_bus_width(tdc, slave_bw: tdc->dma_sconfig.dst_addr_width); |
1001 | *burst_size = tdc->dma_sconfig.dst_maxburst; |
1002 | *slave_bw = tdc->dma_sconfig.dst_addr_width; |
1003 | *csr = TEGRA_APBDMA_CSR_DIR; |
1004 | return 0; |
1005 | |
1006 | case DMA_DEV_TO_MEM: |
1007 | *apb_addr = tdc->dma_sconfig.src_addr; |
1008 | *apb_seq = get_bus_width(tdc, slave_bw: tdc->dma_sconfig.src_addr_width); |
1009 | *burst_size = tdc->dma_sconfig.src_maxburst; |
1010 | *slave_bw = tdc->dma_sconfig.src_addr_width; |
1011 | *csr = 0; |
1012 | return 0; |
1013 | |
1014 | default: |
1015 | dev_err(tdc2dev(tdc), "DMA direction is not supported\n" ); |
1016 | break; |
1017 | } |
1018 | |
1019 | return -EINVAL; |
1020 | } |
1021 | |
1022 | static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc, |
1023 | struct tegra_dma_channel_regs *ch_regs, |
1024 | u32 len) |
1025 | { |
1026 | u32 len_field = (len - 4) & 0xFFFC; |
1027 | |
1028 | if (tdc->tdma->chip_data->support_separate_wcount_reg) |
1029 | ch_regs->wcount = len_field; |
1030 | else |
1031 | ch_regs->csr |= len_field; |
1032 | } |
1033 | |
1034 | static struct dma_async_tx_descriptor * |
1035 | tegra_dma_prep_slave_sg(struct dma_chan *dc, |
1036 | struct scatterlist *sgl, |
1037 | unsigned int sg_len, |
1038 | enum dma_transfer_direction direction, |
1039 | unsigned long flags, |
1040 | void *context) |
1041 | { |
1042 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
1043 | struct tegra_dma_sg_req *sg_req = NULL; |
1044 | u32 csr, ahb_seq, apb_ptr, apb_seq; |
1045 | enum dma_slave_buswidth slave_bw; |
1046 | struct tegra_dma_desc *dma_desc; |
1047 | struct list_head req_list; |
1048 | struct scatterlist *sg; |
1049 | unsigned int burst_size; |
1050 | unsigned int i; |
1051 | |
1052 | if (!tdc->config_init) { |
1053 | dev_err(tdc2dev(tdc), "DMA channel is not configured\n" ); |
1054 | return NULL; |
1055 | } |
1056 | if (sg_len < 1) { |
1057 | dev_err(tdc2dev(tdc), "Invalid segment length %d\n" , sg_len); |
1058 | return NULL; |
1059 | } |
1060 | |
1061 | if (get_transfer_param(tdc, direction, apb_addr: &apb_ptr, apb_seq: &apb_seq, csr: &csr, |
1062 | burst_size: &burst_size, slave_bw: &slave_bw) < 0) |
1063 | return NULL; |
1064 | |
1065 | INIT_LIST_HEAD(list: &req_list); |
1066 | |
1067 | ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; |
1068 | ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << |
1069 | TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; |
1070 | ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; |
1071 | |
1072 | csr |= TEGRA_APBDMA_CSR_ONCE; |
1073 | |
1074 | if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) { |
1075 | csr |= TEGRA_APBDMA_CSR_FLOW; |
1076 | csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; |
1077 | } |
1078 | |
1079 | if (flags & DMA_PREP_INTERRUPT) { |
1080 | csr |= TEGRA_APBDMA_CSR_IE_EOC; |
1081 | } else { |
1082 | WARN_ON_ONCE(1); |
1083 | return NULL; |
1084 | } |
1085 | |
1086 | apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; |
1087 | |
1088 | dma_desc = tegra_dma_desc_get(tdc); |
1089 | if (!dma_desc) { |
1090 | dev_err(tdc2dev(tdc), "DMA descriptors not available\n" ); |
1091 | return NULL; |
1092 | } |
1093 | INIT_LIST_HEAD(list: &dma_desc->tx_list); |
1094 | INIT_LIST_HEAD(list: &dma_desc->cb_node); |
1095 | dma_desc->cb_count = 0; |
1096 | dma_desc->bytes_requested = 0; |
1097 | dma_desc->bytes_transferred = 0; |
1098 | dma_desc->dma_status = DMA_IN_PROGRESS; |
1099 | |
1100 | /* Make transfer requests */ |
1101 | for_each_sg(sgl, sg, sg_len, i) { |
1102 | u32 len, mem; |
1103 | |
1104 | mem = sg_dma_address(sg); |
1105 | len = sg_dma_len(sg); |
1106 | |
1107 | if ((len & 3) || (mem & 3) || |
1108 | len > tdc->tdma->chip_data->max_dma_count) { |
1109 | dev_err(tdc2dev(tdc), |
1110 | "DMA length/memory address is not supported\n" ); |
1111 | tegra_dma_desc_put(tdc, dma_desc); |
1112 | return NULL; |
1113 | } |
1114 | |
1115 | sg_req = tegra_dma_sg_req_get(tdc); |
1116 | if (!sg_req) { |
1117 | dev_err(tdc2dev(tdc), "DMA sg-req not available\n" ); |
1118 | tegra_dma_desc_put(tdc, dma_desc); |
1119 | return NULL; |
1120 | } |
1121 | |
1122 | ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); |
1123 | dma_desc->bytes_requested += len; |
1124 | |
1125 | sg_req->ch_regs.apb_ptr = apb_ptr; |
1126 | sg_req->ch_regs.ahb_ptr = mem; |
1127 | sg_req->ch_regs.csr = csr; |
1128 | tegra_dma_prep_wcount(tdc, ch_regs: &sg_req->ch_regs, len); |
1129 | sg_req->ch_regs.apb_seq = apb_seq; |
1130 | sg_req->ch_regs.ahb_seq = ahb_seq; |
1131 | sg_req->configured = false; |
1132 | sg_req->last_sg = false; |
1133 | sg_req->dma_desc = dma_desc; |
1134 | sg_req->req_len = len; |
1135 | |
1136 | list_add_tail(new: &sg_req->node, head: &dma_desc->tx_list); |
1137 | } |
1138 | sg_req->last_sg = true; |
1139 | if (flags & DMA_CTRL_ACK) |
1140 | dma_desc->txd.flags = DMA_CTRL_ACK; |
1141 | |
1142 | /* |
1143 | * Make sure that mode should not be conflicting with currently |
1144 | * configured mode. |
1145 | */ |
1146 | if (!tdc->isr_handler) { |
1147 | tdc->isr_handler = handle_once_dma_done; |
1148 | tdc->cyclic = false; |
1149 | } else { |
1150 | if (tdc->cyclic) { |
1151 | dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n" ); |
1152 | tegra_dma_desc_put(tdc, dma_desc); |
1153 | return NULL; |
1154 | } |
1155 | } |
1156 | |
1157 | return &dma_desc->txd; |
1158 | } |
1159 | |
1160 | static struct dma_async_tx_descriptor * |
1161 | tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr, |
1162 | size_t buf_len, |
1163 | size_t period_len, |
1164 | enum dma_transfer_direction direction, |
1165 | unsigned long flags) |
1166 | { |
1167 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
1168 | struct tegra_dma_sg_req *sg_req = NULL; |
1169 | u32 csr, ahb_seq, apb_ptr, apb_seq; |
1170 | enum dma_slave_buswidth slave_bw; |
1171 | struct tegra_dma_desc *dma_desc; |
1172 | dma_addr_t mem = buf_addr; |
1173 | unsigned int burst_size; |
1174 | size_t len, remain_len; |
1175 | |
1176 | if (!buf_len || !period_len) { |
1177 | dev_err(tdc2dev(tdc), "Invalid buffer/period len\n" ); |
1178 | return NULL; |
1179 | } |
1180 | |
1181 | if (!tdc->config_init) { |
1182 | dev_err(tdc2dev(tdc), "DMA slave is not configured\n" ); |
1183 | return NULL; |
1184 | } |
1185 | |
1186 | /* |
1187 | * We allow to take more number of requests till DMA is |
1188 | * not started. The driver will loop over all requests. |
1189 | * Once DMA is started then new requests can be queued only after |
1190 | * terminating the DMA. |
1191 | */ |
1192 | if (tdc->busy) { |
1193 | dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n" ); |
1194 | return NULL; |
1195 | } |
1196 | |
1197 | /* |
1198 | * We only support cycle transfer when buf_len is multiple of |
1199 | * period_len. |
1200 | */ |
1201 | if (buf_len % period_len) { |
1202 | dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n" ); |
1203 | return NULL; |
1204 | } |
1205 | |
1206 | len = period_len; |
1207 | if ((len & 3) || (buf_addr & 3) || |
1208 | len > tdc->tdma->chip_data->max_dma_count) { |
1209 | dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n" ); |
1210 | return NULL; |
1211 | } |
1212 | |
1213 | if (get_transfer_param(tdc, direction, apb_addr: &apb_ptr, apb_seq: &apb_seq, csr: &csr, |
1214 | burst_size: &burst_size, slave_bw: &slave_bw) < 0) |
1215 | return NULL; |
1216 | |
1217 | ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; |
1218 | ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << |
1219 | TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; |
1220 | ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; |
1221 | |
1222 | if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) { |
1223 | csr |= TEGRA_APBDMA_CSR_FLOW; |
1224 | csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; |
1225 | } |
1226 | |
1227 | if (flags & DMA_PREP_INTERRUPT) { |
1228 | csr |= TEGRA_APBDMA_CSR_IE_EOC; |
1229 | } else { |
1230 | WARN_ON_ONCE(1); |
1231 | return NULL; |
1232 | } |
1233 | |
1234 | apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; |
1235 | |
1236 | dma_desc = tegra_dma_desc_get(tdc); |
1237 | if (!dma_desc) { |
1238 | dev_err(tdc2dev(tdc), "not enough descriptors available\n" ); |
1239 | return NULL; |
1240 | } |
1241 | |
1242 | INIT_LIST_HEAD(list: &dma_desc->tx_list); |
1243 | INIT_LIST_HEAD(list: &dma_desc->cb_node); |
1244 | dma_desc->cb_count = 0; |
1245 | |
1246 | dma_desc->bytes_transferred = 0; |
1247 | dma_desc->bytes_requested = buf_len; |
1248 | remain_len = buf_len; |
1249 | |
1250 | /* Split transfer equal to period size */ |
1251 | while (remain_len) { |
1252 | sg_req = tegra_dma_sg_req_get(tdc); |
1253 | if (!sg_req) { |
1254 | dev_err(tdc2dev(tdc), "DMA sg-req not available\n" ); |
1255 | tegra_dma_desc_put(tdc, dma_desc); |
1256 | return NULL; |
1257 | } |
1258 | |
1259 | ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); |
1260 | sg_req->ch_regs.apb_ptr = apb_ptr; |
1261 | sg_req->ch_regs.ahb_ptr = mem; |
1262 | sg_req->ch_regs.csr = csr; |
1263 | tegra_dma_prep_wcount(tdc, ch_regs: &sg_req->ch_regs, len); |
1264 | sg_req->ch_regs.apb_seq = apb_seq; |
1265 | sg_req->ch_regs.ahb_seq = ahb_seq; |
1266 | sg_req->configured = false; |
1267 | sg_req->last_sg = false; |
1268 | sg_req->dma_desc = dma_desc; |
1269 | sg_req->req_len = len; |
1270 | |
1271 | list_add_tail(new: &sg_req->node, head: &dma_desc->tx_list); |
1272 | remain_len -= len; |
1273 | mem += len; |
1274 | } |
1275 | sg_req->last_sg = true; |
1276 | if (flags & DMA_CTRL_ACK) |
1277 | dma_desc->txd.flags = DMA_CTRL_ACK; |
1278 | |
1279 | /* |
1280 | * Make sure that mode should not be conflicting with currently |
1281 | * configured mode. |
1282 | */ |
1283 | if (!tdc->isr_handler) { |
1284 | tdc->isr_handler = handle_cont_sngl_cycle_dma_done; |
1285 | tdc->cyclic = true; |
1286 | } else { |
1287 | if (!tdc->cyclic) { |
1288 | dev_err(tdc2dev(tdc), "DMA configuration conflict\n" ); |
1289 | tegra_dma_desc_put(tdc, dma_desc); |
1290 | return NULL; |
1291 | } |
1292 | } |
1293 | |
1294 | return &dma_desc->txd; |
1295 | } |
1296 | |
1297 | static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) |
1298 | { |
1299 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
1300 | |
1301 | dma_cookie_init(chan: &tdc->dma_chan); |
1302 | |
1303 | return 0; |
1304 | } |
1305 | |
1306 | static void tegra_dma_free_chan_resources(struct dma_chan *dc) |
1307 | { |
1308 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
1309 | struct tegra_dma_desc *dma_desc; |
1310 | struct tegra_dma_sg_req *sg_req; |
1311 | struct list_head dma_desc_list; |
1312 | struct list_head sg_req_list; |
1313 | |
1314 | INIT_LIST_HEAD(list: &dma_desc_list); |
1315 | INIT_LIST_HEAD(list: &sg_req_list); |
1316 | |
1317 | dev_dbg(tdc2dev(tdc), "Freeing channel %d\n" , tdc->id); |
1318 | |
1319 | tegra_dma_terminate_all(dc); |
1320 | tasklet_kill(t: &tdc->tasklet); |
1321 | |
1322 | list_splice_init(list: &tdc->pending_sg_req, head: &sg_req_list); |
1323 | list_splice_init(list: &tdc->free_sg_req, head: &sg_req_list); |
1324 | list_splice_init(list: &tdc->free_dma_desc, head: &dma_desc_list); |
1325 | INIT_LIST_HEAD(list: &tdc->cb_desc); |
1326 | tdc->config_init = false; |
1327 | tdc->isr_handler = NULL; |
1328 | |
1329 | while (!list_empty(head: &dma_desc_list)) { |
1330 | dma_desc = list_first_entry(&dma_desc_list, typeof(*dma_desc), |
1331 | node); |
1332 | list_del(entry: &dma_desc->node); |
1333 | kfree(objp: dma_desc); |
1334 | } |
1335 | |
1336 | while (!list_empty(head: &sg_req_list)) { |
1337 | sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node); |
1338 | list_del(entry: &sg_req->node); |
1339 | kfree(objp: sg_req); |
1340 | } |
1341 | |
1342 | tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID; |
1343 | } |
1344 | |
1345 | static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, |
1346 | struct of_dma *ofdma) |
1347 | { |
1348 | struct tegra_dma *tdma = ofdma->of_dma_data; |
1349 | struct tegra_dma_channel *tdc; |
1350 | struct dma_chan *chan; |
1351 | |
1352 | if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) { |
1353 | dev_err(tdma->dev, "Invalid slave id: %d\n" , dma_spec->args[0]); |
1354 | return NULL; |
1355 | } |
1356 | |
1357 | chan = dma_get_any_slave_channel(device: &tdma->dma_dev); |
1358 | if (!chan) |
1359 | return NULL; |
1360 | |
1361 | tdc = to_tegra_dma_chan(dc: chan); |
1362 | tdc->slave_id = dma_spec->args[0]; |
1363 | |
1364 | return chan; |
1365 | } |
1366 | |
1367 | /* Tegra20 specific DMA controller information */ |
1368 | static const struct tegra_dma_chip_data tegra20_dma_chip_data = { |
1369 | .nr_channels = 16, |
1370 | .channel_reg_size = 0x20, |
1371 | .max_dma_count = 1024UL * 64, |
1372 | .support_channel_pause = false, |
1373 | .support_separate_wcount_reg = false, |
1374 | }; |
1375 | |
1376 | /* Tegra30 specific DMA controller information */ |
1377 | static const struct tegra_dma_chip_data tegra30_dma_chip_data = { |
1378 | .nr_channels = 32, |
1379 | .channel_reg_size = 0x20, |
1380 | .max_dma_count = 1024UL * 64, |
1381 | .support_channel_pause = false, |
1382 | .support_separate_wcount_reg = false, |
1383 | }; |
1384 | |
1385 | /* Tegra114 specific DMA controller information */ |
1386 | static const struct tegra_dma_chip_data tegra114_dma_chip_data = { |
1387 | .nr_channels = 32, |
1388 | .channel_reg_size = 0x20, |
1389 | .max_dma_count = 1024UL * 64, |
1390 | .support_channel_pause = true, |
1391 | .support_separate_wcount_reg = false, |
1392 | }; |
1393 | |
1394 | /* Tegra148 specific DMA controller information */ |
1395 | static const struct tegra_dma_chip_data tegra148_dma_chip_data = { |
1396 | .nr_channels = 32, |
1397 | .channel_reg_size = 0x40, |
1398 | .max_dma_count = 1024UL * 64, |
1399 | .support_channel_pause = true, |
1400 | .support_separate_wcount_reg = true, |
1401 | }; |
1402 | |
1403 | static int tegra_dma_init_hw(struct tegra_dma *tdma) |
1404 | { |
1405 | int err; |
1406 | |
1407 | err = reset_control_assert(rstc: tdma->rst); |
1408 | if (err) { |
1409 | dev_err(tdma->dev, "failed to assert reset: %d\n" , err); |
1410 | return err; |
1411 | } |
1412 | |
1413 | err = clk_enable(clk: tdma->dma_clk); |
1414 | if (err) { |
1415 | dev_err(tdma->dev, "failed to enable clk: %d\n" , err); |
1416 | return err; |
1417 | } |
1418 | |
1419 | /* reset DMA controller */ |
1420 | udelay(2); |
1421 | reset_control_deassert(rstc: tdma->rst); |
1422 | |
1423 | /* enable global DMA registers */ |
1424 | tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); |
1425 | tdma_write(tdma, TEGRA_APBDMA_CONTROL, val: 0); |
1426 | tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, val: 0xFFFFFFFF); |
1427 | |
1428 | clk_disable(clk: tdma->dma_clk); |
1429 | |
1430 | return 0; |
1431 | } |
1432 | |
1433 | static int tegra_dma_probe(struct platform_device *pdev) |
1434 | { |
1435 | const struct tegra_dma_chip_data *cdata; |
1436 | struct tegra_dma *tdma; |
1437 | unsigned int i; |
1438 | size_t size; |
1439 | int ret; |
1440 | |
1441 | cdata = of_device_get_match_data(dev: &pdev->dev); |
1442 | size = struct_size(tdma, channels, cdata->nr_channels); |
1443 | |
1444 | tdma = devm_kzalloc(dev: &pdev->dev, size, GFP_KERNEL); |
1445 | if (!tdma) |
1446 | return -ENOMEM; |
1447 | |
1448 | tdma->dev = &pdev->dev; |
1449 | tdma->chip_data = cdata; |
1450 | platform_set_drvdata(pdev, data: tdma); |
1451 | |
1452 | tdma->base_addr = devm_platform_ioremap_resource(pdev, index: 0); |
1453 | if (IS_ERR(ptr: tdma->base_addr)) |
1454 | return PTR_ERR(ptr: tdma->base_addr); |
1455 | |
1456 | tdma->dma_clk = devm_clk_get(dev: &pdev->dev, NULL); |
1457 | if (IS_ERR(ptr: tdma->dma_clk)) { |
1458 | dev_err(&pdev->dev, "Error: Missing controller clock\n" ); |
1459 | return PTR_ERR(ptr: tdma->dma_clk); |
1460 | } |
1461 | |
1462 | tdma->rst = devm_reset_control_get(dev: &pdev->dev, id: "dma" ); |
1463 | if (IS_ERR(ptr: tdma->rst)) { |
1464 | dev_err(&pdev->dev, "Error: Missing reset\n" ); |
1465 | return PTR_ERR(ptr: tdma->rst); |
1466 | } |
1467 | |
1468 | spin_lock_init(&tdma->global_lock); |
1469 | |
1470 | ret = clk_prepare(clk: tdma->dma_clk); |
1471 | if (ret) |
1472 | return ret; |
1473 | |
1474 | ret = tegra_dma_init_hw(tdma); |
1475 | if (ret) |
1476 | goto err_clk_unprepare; |
1477 | |
1478 | pm_runtime_irq_safe(dev: &pdev->dev); |
1479 | pm_runtime_enable(dev: &pdev->dev); |
1480 | |
1481 | INIT_LIST_HEAD(list: &tdma->dma_dev.channels); |
1482 | for (i = 0; i < cdata->nr_channels; i++) { |
1483 | struct tegra_dma_channel *tdc = &tdma->channels[i]; |
1484 | int irq; |
1485 | |
1486 | tdc->chan_addr = tdma->base_addr + |
1487 | TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + |
1488 | (i * cdata->channel_reg_size); |
1489 | |
1490 | irq = platform_get_irq(pdev, i); |
1491 | if (irq < 0) { |
1492 | ret = irq; |
1493 | goto err_pm_disable; |
1494 | } |
1495 | |
1496 | snprintf(buf: tdc->name, size: sizeof(tdc->name), fmt: "apbdma.%d" , i); |
1497 | ret = devm_request_irq(dev: &pdev->dev, irq, handler: tegra_dma_isr, irqflags: 0, |
1498 | devname: tdc->name, dev_id: tdc); |
1499 | if (ret) { |
1500 | dev_err(&pdev->dev, |
1501 | "request_irq failed with err %d channel %d\n" , |
1502 | ret, i); |
1503 | goto err_pm_disable; |
1504 | } |
1505 | |
1506 | tdc->dma_chan.device = &tdma->dma_dev; |
1507 | dma_cookie_init(chan: &tdc->dma_chan); |
1508 | list_add_tail(new: &tdc->dma_chan.device_node, |
1509 | head: &tdma->dma_dev.channels); |
1510 | tdc->tdma = tdma; |
1511 | tdc->id = i; |
1512 | tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID; |
1513 | |
1514 | tasklet_setup(t: &tdc->tasklet, callback: tegra_dma_tasklet); |
1515 | spin_lock_init(&tdc->lock); |
1516 | init_waitqueue_head(&tdc->wq); |
1517 | |
1518 | INIT_LIST_HEAD(list: &tdc->pending_sg_req); |
1519 | INIT_LIST_HEAD(list: &tdc->free_sg_req); |
1520 | INIT_LIST_HEAD(list: &tdc->free_dma_desc); |
1521 | INIT_LIST_HEAD(list: &tdc->cb_desc); |
1522 | } |
1523 | |
1524 | dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); |
1525 | dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); |
1526 | dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); |
1527 | |
1528 | tdma->global_pause_count = 0; |
1529 | tdma->dma_dev.dev = &pdev->dev; |
1530 | tdma->dma_dev.device_alloc_chan_resources = |
1531 | tegra_dma_alloc_chan_resources; |
1532 | tdma->dma_dev.device_free_chan_resources = |
1533 | tegra_dma_free_chan_resources; |
1534 | tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; |
1535 | tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; |
1536 | tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
1537 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | |
1538 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | |
1539 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); |
1540 | tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
1541 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | |
1542 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | |
1543 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); |
1544 | tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
1545 | tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
1546 | tdma->dma_dev.device_config = tegra_dma_slave_config; |
1547 | tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all; |
1548 | tdma->dma_dev.device_synchronize = tegra_dma_synchronize; |
1549 | tdma->dma_dev.device_tx_status = tegra_dma_tx_status; |
1550 | tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; |
1551 | |
1552 | ret = dma_async_device_register(device: &tdma->dma_dev); |
1553 | if (ret < 0) { |
1554 | dev_err(&pdev->dev, |
1555 | "Tegra20 APB DMA driver registration failed %d\n" , ret); |
1556 | goto err_pm_disable; |
1557 | } |
1558 | |
1559 | ret = of_dma_controller_register(np: pdev->dev.of_node, |
1560 | of_dma_xlate: tegra_dma_of_xlate, data: tdma); |
1561 | if (ret < 0) { |
1562 | dev_err(&pdev->dev, |
1563 | "Tegra20 APB DMA OF registration failed %d\n" , ret); |
1564 | goto err_unregister_dma_dev; |
1565 | } |
1566 | |
1567 | dev_info(&pdev->dev, "Tegra20 APB DMA driver registered %u channels\n" , |
1568 | cdata->nr_channels); |
1569 | |
1570 | return 0; |
1571 | |
1572 | err_unregister_dma_dev: |
1573 | dma_async_device_unregister(device: &tdma->dma_dev); |
1574 | |
1575 | err_pm_disable: |
1576 | pm_runtime_disable(dev: &pdev->dev); |
1577 | |
1578 | err_clk_unprepare: |
1579 | clk_unprepare(clk: tdma->dma_clk); |
1580 | |
1581 | return ret; |
1582 | } |
1583 | |
1584 | static void tegra_dma_remove(struct platform_device *pdev) |
1585 | { |
1586 | struct tegra_dma *tdma = platform_get_drvdata(pdev); |
1587 | |
1588 | of_dma_controller_free(np: pdev->dev.of_node); |
1589 | dma_async_device_unregister(device: &tdma->dma_dev); |
1590 | pm_runtime_disable(dev: &pdev->dev); |
1591 | clk_unprepare(clk: tdma->dma_clk); |
1592 | } |
1593 | |
1594 | static int __maybe_unused tegra_dma_runtime_suspend(struct device *dev) |
1595 | { |
1596 | struct tegra_dma *tdma = dev_get_drvdata(dev); |
1597 | |
1598 | clk_disable(clk: tdma->dma_clk); |
1599 | |
1600 | return 0; |
1601 | } |
1602 | |
1603 | static int __maybe_unused tegra_dma_runtime_resume(struct device *dev) |
1604 | { |
1605 | struct tegra_dma *tdma = dev_get_drvdata(dev); |
1606 | |
1607 | return clk_enable(clk: tdma->dma_clk); |
1608 | } |
1609 | |
1610 | static int __maybe_unused tegra_dma_dev_suspend(struct device *dev) |
1611 | { |
1612 | struct tegra_dma *tdma = dev_get_drvdata(dev); |
1613 | unsigned long flags; |
1614 | unsigned int i; |
1615 | bool busy; |
1616 | |
1617 | for (i = 0; i < tdma->chip_data->nr_channels; i++) { |
1618 | struct tegra_dma_channel *tdc = &tdma->channels[i]; |
1619 | |
1620 | tasklet_kill(t: &tdc->tasklet); |
1621 | |
1622 | spin_lock_irqsave(&tdc->lock, flags); |
1623 | busy = tdc->busy; |
1624 | spin_unlock_irqrestore(lock: &tdc->lock, flags); |
1625 | |
1626 | if (busy) { |
1627 | dev_err(tdma->dev, "channel %u busy\n" , i); |
1628 | return -EBUSY; |
1629 | } |
1630 | } |
1631 | |
1632 | return pm_runtime_force_suspend(dev); |
1633 | } |
1634 | |
1635 | static int __maybe_unused tegra_dma_dev_resume(struct device *dev) |
1636 | { |
1637 | struct tegra_dma *tdma = dev_get_drvdata(dev); |
1638 | int err; |
1639 | |
1640 | err = tegra_dma_init_hw(tdma); |
1641 | if (err) |
1642 | return err; |
1643 | |
1644 | return pm_runtime_force_resume(dev); |
1645 | } |
1646 | |
1647 | static const struct dev_pm_ops tegra_dma_dev_pm_ops = { |
1648 | SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume, |
1649 | NULL) |
1650 | SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_dev_suspend, tegra_dma_dev_resume) |
1651 | }; |
1652 | |
1653 | static const struct of_device_id tegra_dma_of_match[] = { |
1654 | { |
1655 | .compatible = "nvidia,tegra148-apbdma" , |
1656 | .data = &tegra148_dma_chip_data, |
1657 | }, { |
1658 | .compatible = "nvidia,tegra114-apbdma" , |
1659 | .data = &tegra114_dma_chip_data, |
1660 | }, { |
1661 | .compatible = "nvidia,tegra30-apbdma" , |
1662 | .data = &tegra30_dma_chip_data, |
1663 | }, { |
1664 | .compatible = "nvidia,tegra20-apbdma" , |
1665 | .data = &tegra20_dma_chip_data, |
1666 | }, { |
1667 | }, |
1668 | }; |
1669 | MODULE_DEVICE_TABLE(of, tegra_dma_of_match); |
1670 | |
1671 | static struct platform_driver tegra_dmac_driver = { |
1672 | .driver = { |
1673 | .name = "tegra-apbdma" , |
1674 | .pm = &tegra_dma_dev_pm_ops, |
1675 | .of_match_table = tegra_dma_of_match, |
1676 | }, |
1677 | .probe = tegra_dma_probe, |
1678 | .remove_new = tegra_dma_remove, |
1679 | }; |
1680 | |
1681 | module_platform_driver(tegra_dmac_driver); |
1682 | |
1683 | MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver" ); |
1684 | MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>" ); |
1685 | MODULE_LICENSE("GPL v2" ); |
1686 | |