1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * DMA driver for NVIDIA Tegra GPC DMA controller. |
4 | * |
5 | * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. |
6 | */ |
7 | |
8 | #include <linux/bitfield.h> |
9 | #include <linux/dmaengine.h> |
10 | #include <linux/dma-mapping.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/iommu.h> |
13 | #include <linux/iopoll.h> |
14 | #include <linux/minmax.h> |
15 | #include <linux/module.h> |
16 | #include <linux/of.h> |
17 | #include <linux/of_dma.h> |
18 | #include <linux/platform_device.h> |
19 | #include <linux/reset.h> |
20 | #include <linux/slab.h> |
21 | #include <dt-bindings/memory/tegra186-mc.h> |
22 | #include "virt-dma.h" |
23 | |
24 | /* CSR register */ |
25 | #define TEGRA_GPCDMA_CHAN_CSR 0x00 |
26 | #define TEGRA_GPCDMA_CSR_ENB BIT(31) |
27 | #define TEGRA_GPCDMA_CSR_IE_EOC BIT(30) |
28 | #define TEGRA_GPCDMA_CSR_ONCE BIT(27) |
29 | |
30 | #define TEGRA_GPCDMA_CSR_FC_MODE GENMASK(25, 24) |
31 | #define TEGRA_GPCDMA_CSR_FC_MODE_NO_MMIO \ |
32 | FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 0) |
33 | #define TEGRA_GPCDMA_CSR_FC_MODE_ONE_MMIO \ |
34 | FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 1) |
35 | #define TEGRA_GPCDMA_CSR_FC_MODE_TWO_MMIO \ |
36 | FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 2) |
37 | #define TEGRA_GPCDMA_CSR_FC_MODE_FOUR_MMIO \ |
38 | FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 3) |
39 | |
40 | #define TEGRA_GPCDMA_CSR_DMA GENMASK(23, 21) |
41 | #define TEGRA_GPCDMA_CSR_DMA_IO2MEM_NO_FC \ |
42 | FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 0) |
43 | #define TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC \ |
44 | FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 1) |
45 | #define TEGRA_GPCDMA_CSR_DMA_MEM2IO_NO_FC \ |
46 | FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 2) |
47 | #define TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC \ |
48 | FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 3) |
49 | #define TEGRA_GPCDMA_CSR_DMA_MEM2MEM \ |
50 | FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 4) |
51 | #define TEGRA_GPCDMA_CSR_DMA_FIXED_PAT \ |
52 | FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 6) |
53 | |
54 | #define TEGRA_GPCDMA_CSR_REQ_SEL_MASK GENMASK(20, 16) |
55 | #define TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED \ |
56 | FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, 4) |
57 | #define TEGRA_GPCDMA_CSR_IRQ_MASK BIT(15) |
58 | #define TEGRA_GPCDMA_CSR_WEIGHT GENMASK(13, 10) |
59 | |
60 | /* STATUS register */ |
61 | #define TEGRA_GPCDMA_CHAN_STATUS 0x004 |
62 | #define TEGRA_GPCDMA_STATUS_BUSY BIT(31) |
63 | #define TEGRA_GPCDMA_STATUS_ISE_EOC BIT(30) |
64 | #define TEGRA_GPCDMA_STATUS_PING_PONG BIT(28) |
65 | #define TEGRA_GPCDMA_STATUS_DMA_ACTIVITY BIT(27) |
66 | #define TEGRA_GPCDMA_STATUS_CHANNEL_PAUSE BIT(26) |
67 | #define TEGRA_GPCDMA_STATUS_CHANNEL_RX BIT(25) |
68 | #define TEGRA_GPCDMA_STATUS_CHANNEL_TX BIT(24) |
69 | #define TEGRA_GPCDMA_STATUS_IRQ_INTR_STA BIT(23) |
70 | #define TEGRA_GPCDMA_STATUS_IRQ_STA BIT(21) |
71 | #define TEGRA_GPCDMA_STATUS_IRQ_TRIG_STA BIT(20) |
72 | |
73 | #define TEGRA_GPCDMA_CHAN_CSRE 0x008 |
74 | #define TEGRA_GPCDMA_CHAN_CSRE_PAUSE BIT(31) |
75 | |
76 | /* Source address */ |
77 | #define TEGRA_GPCDMA_CHAN_SRC_PTR 0x00C |
78 | |
79 | /* Destination address */ |
80 | #define TEGRA_GPCDMA_CHAN_DST_PTR 0x010 |
81 | |
82 | /* High address pointer */ |
83 | #define TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR 0x014 |
84 | #define TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR GENMASK(7, 0) |
85 | #define TEGRA_GPCDMA_HIGH_ADDR_DST_PTR GENMASK(23, 16) |
86 | |
87 | /* MC sequence register */ |
88 | #define TEGRA_GPCDMA_CHAN_MCSEQ 0x18 |
89 | #define TEGRA_GPCDMA_MCSEQ_DATA_SWAP BIT(31) |
90 | #define TEGRA_GPCDMA_MCSEQ_REQ_COUNT GENMASK(30, 25) |
91 | #define TEGRA_GPCDMA_MCSEQ_BURST GENMASK(24, 23) |
92 | #define TEGRA_GPCDMA_MCSEQ_BURST_2 \ |
93 | FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 0) |
94 | #define TEGRA_GPCDMA_MCSEQ_BURST_16 \ |
95 | FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 3) |
96 | #define TEGRA_GPCDMA_MCSEQ_WRAP1 GENMASK(22, 20) |
97 | #define TEGRA_GPCDMA_MCSEQ_WRAP0 GENMASK(19, 17) |
98 | #define TEGRA_GPCDMA_MCSEQ_WRAP_NONE 0 |
99 | |
100 | #define TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK GENMASK(13, 7) |
101 | #define TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK GENMASK(6, 0) |
102 | |
103 | /* MMIO sequence register */ |
104 | #define TEGRA_GPCDMA_CHAN_MMIOSEQ 0x01c |
105 | #define TEGRA_GPCDMA_MMIOSEQ_DBL_BUF BIT(31) |
106 | #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH GENMASK(30, 28) |
107 | #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8 \ |
108 | FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 0) |
109 | #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16 \ |
110 | FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 1) |
111 | #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32 \ |
112 | FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 2) |
113 | #define TEGRA_GPCDMA_MMIOSEQ_DATA_SWAP BIT(27) |
114 | #define TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT 23 |
115 | #define TEGRA_GPCDMA_MMIOSEQ_BURST_MIN 2U |
116 | #define TEGRA_GPCDMA_MMIOSEQ_BURST_MAX 32U |
117 | #define TEGRA_GPCDMA_MMIOSEQ_BURST(bs) \ |
118 | (GENMASK((fls(bs) - 2), 0) << TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT) |
119 | #define TEGRA_GPCDMA_MMIOSEQ_MASTER_ID GENMASK(22, 19) |
120 | #define TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD GENMASK(18, 16) |
121 | #define TEGRA_GPCDMA_MMIOSEQ_MMIO_PROT GENMASK(8, 7) |
122 | |
123 | /* Channel WCOUNT */ |
124 | #define TEGRA_GPCDMA_CHAN_WCOUNT 0x20 |
125 | |
126 | /* Transfer count */ |
127 | #define TEGRA_GPCDMA_CHAN_XFER_COUNT 0x24 |
128 | |
129 | /* DMA byte count status */ |
130 | #define TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS 0x28 |
131 | |
132 | /* Error Status Register */ |
133 | #define TEGRA_GPCDMA_CHAN_ERR_STATUS 0x30 |
134 | #define TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT 8 |
135 | #define TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK 0xF |
136 | #define TEGRA_GPCDMA_CHAN_ERR_TYPE(err) ( \ |
137 | ((err) >> TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT) & \ |
138 | TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK) |
139 | #define TEGRA_DMA_BM_FIFO_FULL_ERR 0xF |
140 | #define TEGRA_DMA_PERIPH_FIFO_FULL_ERR 0xE |
141 | #define TEGRA_DMA_PERIPH_ID_ERR 0xD |
142 | #define TEGRA_DMA_STREAM_ID_ERR 0xC |
143 | #define TEGRA_DMA_MC_SLAVE_ERR 0xB |
144 | #define TEGRA_DMA_MMIO_SLAVE_ERR 0xA |
145 | |
146 | /* Fixed Pattern */ |
147 | #define TEGRA_GPCDMA_CHAN_FIXED_PATTERN 0x34 |
148 | |
149 | #define TEGRA_GPCDMA_CHAN_TZ 0x38 |
150 | #define TEGRA_GPCDMA_CHAN_TZ_MMIO_PROT_1 BIT(0) |
151 | #define TEGRA_GPCDMA_CHAN_TZ_MC_PROT_1 BIT(1) |
152 | |
153 | #define TEGRA_GPCDMA_CHAN_SPARE 0x3c |
154 | #define TEGRA_GPCDMA_CHAN_SPARE_EN_LEGACY_FC BIT(16) |
155 | |
156 | /* |
157 | * If any burst is in flight and DMA paused then this is the time to complete |
158 | * on-flight burst and update DMA status register. |
159 | */ |
160 | #define TEGRA_GPCDMA_BURST_COMPLETE_TIME 10 |
161 | #define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 5000 /* 5 msec */ |
162 | |
163 | /* Channel base address offset from GPCDMA base address */ |
164 | #define TEGRA_GPCDMA_CHANNEL_BASE_ADDR_OFFSET 0x10000 |
165 | |
166 | /* Default channel mask reserving channel0 */ |
167 | #define TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK 0xfffffffe |
168 | |
169 | struct tegra_dma; |
170 | struct tegra_dma_channel; |
171 | |
172 | /* |
173 | * tegra_dma_chip_data Tegra chip specific DMA data |
174 | * @nr_channels: Number of channels available in the controller. |
175 | * @channel_reg_size: Channel register size. |
176 | * @max_dma_count: Maximum DMA transfer count supported by DMA controller. |
177 | * @hw_support_pause: DMA HW engine support pause of the channel. |
178 | */ |
179 | struct tegra_dma_chip_data { |
180 | bool hw_support_pause; |
181 | unsigned int nr_channels; |
182 | unsigned int channel_reg_size; |
183 | unsigned int max_dma_count; |
184 | int (*terminate)(struct tegra_dma_channel *tdc); |
185 | }; |
186 | |
187 | /* DMA channel registers */ |
188 | struct tegra_dma_channel_regs { |
189 | u32 csr; |
190 | u32 src_ptr; |
191 | u32 dst_ptr; |
192 | u32 high_addr_ptr; |
193 | u32 mc_seq; |
194 | u32 mmio_seq; |
195 | u32 wcount; |
196 | u32 fixed_pattern; |
197 | }; |
198 | |
199 | /* |
200 | * tegra_dma_sg_req: DMA request details to configure hardware. This |
201 | * contains the details for one transfer to configure DMA hw. |
202 | * The client's request for data transfer can be broken into multiple |
203 | * sub-transfer as per requester details and hw support. This sub transfer |
204 | * get added as an array in Tegra DMA desc which manages the transfer details. |
205 | */ |
206 | struct tegra_dma_sg_req { |
207 | unsigned int len; |
208 | struct tegra_dma_channel_regs ch_regs; |
209 | }; |
210 | |
211 | /* |
212 | * tegra_dma_desc: Tegra DMA descriptors which uses virt_dma_desc to |
213 | * manage client request and keep track of transfer status, callbacks |
214 | * and request counts etc. |
215 | */ |
216 | struct tegra_dma_desc { |
217 | bool cyclic; |
218 | unsigned int bytes_req; |
219 | unsigned int bytes_xfer; |
220 | unsigned int sg_idx; |
221 | unsigned int sg_count; |
222 | struct virt_dma_desc vd; |
223 | struct tegra_dma_channel *tdc; |
224 | struct tegra_dma_sg_req sg_req[] __counted_by(sg_count); |
225 | }; |
226 | |
227 | /* |
228 | * tegra_dma_channel: Channel specific information |
229 | */ |
230 | struct tegra_dma_channel { |
231 | bool config_init; |
232 | char name[30]; |
233 | enum dma_transfer_direction sid_dir; |
234 | int id; |
235 | int irq; |
236 | int slave_id; |
237 | struct tegra_dma *tdma; |
238 | struct virt_dma_chan vc; |
239 | struct tegra_dma_desc *dma_desc; |
240 | struct dma_slave_config dma_sconfig; |
241 | unsigned int stream_id; |
242 | unsigned long chan_base_offset; |
243 | }; |
244 | |
245 | /* |
246 | * tegra_dma: Tegra DMA specific information |
247 | */ |
248 | struct tegra_dma { |
249 | const struct tegra_dma_chip_data *chip_data; |
250 | unsigned long sid_m2d_reserved; |
251 | unsigned long sid_d2m_reserved; |
252 | u32 chan_mask; |
253 | void __iomem *base_addr; |
254 | struct device *dev; |
255 | struct dma_device dma_dev; |
256 | struct reset_control *rst; |
257 | struct tegra_dma_channel channels[]; |
258 | }; |
259 | |
260 | static inline void tdc_write(struct tegra_dma_channel *tdc, |
261 | u32 reg, u32 val) |
262 | { |
263 | writel_relaxed(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg); |
264 | } |
265 | |
266 | static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg) |
267 | { |
268 | return readl_relaxed(tdc->tdma->base_addr + tdc->chan_base_offset + reg); |
269 | } |
270 | |
271 | static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) |
272 | { |
273 | return container_of(dc, struct tegra_dma_channel, vc.chan); |
274 | } |
275 | |
276 | static inline struct tegra_dma_desc *vd_to_tegra_dma_desc(struct virt_dma_desc *vd) |
277 | { |
278 | return container_of(vd, struct tegra_dma_desc, vd); |
279 | } |
280 | |
281 | static inline struct device *tdc2dev(struct tegra_dma_channel *tdc) |
282 | { |
283 | return tdc->vc.chan.device->dev; |
284 | } |
285 | |
286 | static void tegra_dma_dump_chan_regs(struct tegra_dma_channel *tdc) |
287 | { |
288 | dev_dbg(tdc2dev(tdc), "DMA Channel %d name %s register dump:\n" , |
289 | tdc->id, tdc->name); |
290 | dev_dbg(tdc2dev(tdc), "CSR %x STA %x CSRE %x SRC %x DST %x\n" , |
291 | tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR), |
292 | tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS), |
293 | tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE), |
294 | tdc_read(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR), |
295 | tdc_read(tdc, TEGRA_GPCDMA_CHAN_DST_PTR) |
296 | ); |
297 | dev_dbg(tdc2dev(tdc), "MCSEQ %x IOSEQ %x WCNT %x XFER %x BSTA %x\n" , |
298 | tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ), |
299 | tdc_read(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ), |
300 | tdc_read(tdc, TEGRA_GPCDMA_CHAN_WCOUNT), |
301 | tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT), |
302 | tdc_read(tdc, TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS) |
303 | ); |
304 | dev_dbg(tdc2dev(tdc), "DMA ERR_STA %x\n" , |
305 | tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS)); |
306 | } |
307 | |
308 | static int tegra_dma_sid_reserve(struct tegra_dma_channel *tdc, |
309 | enum dma_transfer_direction direction) |
310 | { |
311 | struct tegra_dma *tdma = tdc->tdma; |
312 | int sid = tdc->slave_id; |
313 | |
314 | if (!is_slave_direction(direction)) |
315 | return 0; |
316 | |
317 | switch (direction) { |
318 | case DMA_MEM_TO_DEV: |
319 | if (test_and_set_bit(nr: sid, addr: &tdma->sid_m2d_reserved)) { |
320 | dev_err(tdma->dev, "slave id already in use\n" ); |
321 | return -EINVAL; |
322 | } |
323 | break; |
324 | case DMA_DEV_TO_MEM: |
325 | if (test_and_set_bit(nr: sid, addr: &tdma->sid_d2m_reserved)) { |
326 | dev_err(tdma->dev, "slave id already in use\n" ); |
327 | return -EINVAL; |
328 | } |
329 | break; |
330 | default: |
331 | break; |
332 | } |
333 | |
334 | tdc->sid_dir = direction; |
335 | |
336 | return 0; |
337 | } |
338 | |
339 | static void tegra_dma_sid_free(struct tegra_dma_channel *tdc) |
340 | { |
341 | struct tegra_dma *tdma = tdc->tdma; |
342 | int sid = tdc->slave_id; |
343 | |
344 | switch (tdc->sid_dir) { |
345 | case DMA_MEM_TO_DEV: |
346 | clear_bit(nr: sid, addr: &tdma->sid_m2d_reserved); |
347 | break; |
348 | case DMA_DEV_TO_MEM: |
349 | clear_bit(nr: sid, addr: &tdma->sid_d2m_reserved); |
350 | break; |
351 | default: |
352 | break; |
353 | } |
354 | |
355 | tdc->sid_dir = DMA_TRANS_NONE; |
356 | } |
357 | |
358 | static void tegra_dma_desc_free(struct virt_dma_desc *vd) |
359 | { |
360 | kfree(container_of(vd, struct tegra_dma_desc, vd)); |
361 | } |
362 | |
363 | static int tegra_dma_slave_config(struct dma_chan *dc, |
364 | struct dma_slave_config *sconfig) |
365 | { |
366 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
367 | |
368 | memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); |
369 | tdc->config_init = true; |
370 | |
371 | return 0; |
372 | } |
373 | |
374 | static int tegra_dma_pause(struct tegra_dma_channel *tdc) |
375 | { |
376 | int ret; |
377 | u32 val; |
378 | |
379 | val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE); |
380 | val |= TEGRA_GPCDMA_CHAN_CSRE_PAUSE; |
381 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val); |
382 | |
383 | /* Wait until busy bit is de-asserted */ |
384 | ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr + |
385 | tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS, |
386 | val, |
387 | !(val & TEGRA_GPCDMA_STATUS_BUSY), |
388 | TEGRA_GPCDMA_BURST_COMPLETE_TIME, |
389 | TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT); |
390 | |
391 | if (ret) { |
392 | dev_err(tdc2dev(tdc), "DMA pause timed out\n" ); |
393 | tegra_dma_dump_chan_regs(tdc); |
394 | } |
395 | |
396 | return ret; |
397 | } |
398 | |
399 | static int tegra_dma_device_pause(struct dma_chan *dc) |
400 | { |
401 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
402 | unsigned long flags; |
403 | int ret; |
404 | |
405 | if (!tdc->tdma->chip_data->hw_support_pause) |
406 | return -ENOSYS; |
407 | |
408 | spin_lock_irqsave(&tdc->vc.lock, flags); |
409 | ret = tegra_dma_pause(tdc); |
410 | spin_unlock_irqrestore(lock: &tdc->vc.lock, flags); |
411 | |
412 | return ret; |
413 | } |
414 | |
415 | static void tegra_dma_resume(struct tegra_dma_channel *tdc) |
416 | { |
417 | u32 val; |
418 | |
419 | val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE); |
420 | val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE; |
421 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val); |
422 | } |
423 | |
424 | static int tegra_dma_device_resume(struct dma_chan *dc) |
425 | { |
426 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
427 | unsigned long flags; |
428 | |
429 | if (!tdc->tdma->chip_data->hw_support_pause) |
430 | return -ENOSYS; |
431 | |
432 | spin_lock_irqsave(&tdc->vc.lock, flags); |
433 | tegra_dma_resume(tdc); |
434 | spin_unlock_irqrestore(lock: &tdc->vc.lock, flags); |
435 | |
436 | return 0; |
437 | } |
438 | |
439 | static inline int tegra_dma_pause_noerr(struct tegra_dma_channel *tdc) |
440 | { |
441 | /* Return 0 irrespective of PAUSE status. |
442 | * This is useful to recover channels that can exit out of flush |
443 | * state when the channel is disabled. |
444 | */ |
445 | |
446 | tegra_dma_pause(tdc); |
447 | return 0; |
448 | } |
449 | |
450 | static void tegra_dma_disable(struct tegra_dma_channel *tdc) |
451 | { |
452 | u32 csr, status; |
453 | |
454 | csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR); |
455 | |
456 | /* Disable interrupts */ |
457 | csr &= ~TEGRA_GPCDMA_CSR_IE_EOC; |
458 | |
459 | /* Disable DMA */ |
460 | csr &= ~TEGRA_GPCDMA_CSR_ENB; |
461 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, val: csr); |
462 | |
463 | /* Clear interrupt status if it is there */ |
464 | status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS); |
465 | if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) { |
466 | dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n" , __func__); |
467 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, val: status); |
468 | } |
469 | } |
470 | |
471 | static void tegra_dma_configure_next_sg(struct tegra_dma_channel *tdc) |
472 | { |
473 | struct tegra_dma_desc *dma_desc = tdc->dma_desc; |
474 | struct tegra_dma_channel_regs *ch_regs; |
475 | int ret; |
476 | u32 val; |
477 | |
478 | dma_desc->sg_idx++; |
479 | |
480 | /* Reset the sg index for cyclic transfers */ |
481 | if (dma_desc->sg_idx == dma_desc->sg_count) |
482 | dma_desc->sg_idx = 0; |
483 | |
484 | /* Configure next transfer immediately after DMA is busy */ |
485 | ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr + |
486 | tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS, |
487 | val, |
488 | (val & TEGRA_GPCDMA_STATUS_BUSY), 0, |
489 | TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT); |
490 | if (ret) |
491 | return; |
492 | |
493 | ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs; |
494 | |
495 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, val: ch_regs->wcount); |
496 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, val: ch_regs->src_ptr); |
497 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, val: ch_regs->dst_ptr); |
498 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, val: ch_regs->high_addr_ptr); |
499 | |
500 | /* Start DMA */ |
501 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, |
502 | val: ch_regs->csr | TEGRA_GPCDMA_CSR_ENB); |
503 | } |
504 | |
505 | static void tegra_dma_start(struct tegra_dma_channel *tdc) |
506 | { |
507 | struct tegra_dma_desc *dma_desc = tdc->dma_desc; |
508 | struct tegra_dma_channel_regs *ch_regs; |
509 | struct virt_dma_desc *vdesc; |
510 | |
511 | if (!dma_desc) { |
512 | vdesc = vchan_next_desc(vc: &tdc->vc); |
513 | if (!vdesc) |
514 | return; |
515 | |
516 | dma_desc = vd_to_tegra_dma_desc(vd: vdesc); |
517 | list_del(entry: &vdesc->node); |
518 | dma_desc->tdc = tdc; |
519 | tdc->dma_desc = dma_desc; |
520 | |
521 | tegra_dma_resume(tdc); |
522 | } |
523 | |
524 | ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs; |
525 | |
526 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, val: ch_regs->wcount); |
527 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, val: 0); |
528 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, val: ch_regs->src_ptr); |
529 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, val: ch_regs->dst_ptr); |
530 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, val: ch_regs->high_addr_ptr); |
531 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_FIXED_PATTERN, val: ch_regs->fixed_pattern); |
532 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ, val: ch_regs->mmio_seq); |
533 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, val: ch_regs->mc_seq); |
534 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, val: ch_regs->csr); |
535 | |
536 | /* Start DMA */ |
537 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, |
538 | val: ch_regs->csr | TEGRA_GPCDMA_CSR_ENB); |
539 | } |
540 | |
541 | static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc) |
542 | { |
543 | vchan_cookie_complete(vd: &tdc->dma_desc->vd); |
544 | |
545 | tegra_dma_sid_free(tdc); |
546 | tdc->dma_desc = NULL; |
547 | } |
548 | |
549 | static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc, |
550 | unsigned int err_status) |
551 | { |
552 | switch (TEGRA_GPCDMA_CHAN_ERR_TYPE(err_status)) { |
553 | case TEGRA_DMA_BM_FIFO_FULL_ERR: |
554 | dev_err(tdc->tdma->dev, |
555 | "GPCDMA CH%d bm fifo full\n" , tdc->id); |
556 | break; |
557 | |
558 | case TEGRA_DMA_PERIPH_FIFO_FULL_ERR: |
559 | dev_err(tdc->tdma->dev, |
560 | "GPCDMA CH%d peripheral fifo full\n" , tdc->id); |
561 | break; |
562 | |
563 | case TEGRA_DMA_PERIPH_ID_ERR: |
564 | dev_err(tdc->tdma->dev, |
565 | "GPCDMA CH%d illegal peripheral id\n" , tdc->id); |
566 | break; |
567 | |
568 | case TEGRA_DMA_STREAM_ID_ERR: |
569 | dev_err(tdc->tdma->dev, |
570 | "GPCDMA CH%d illegal stream id\n" , tdc->id); |
571 | break; |
572 | |
573 | case TEGRA_DMA_MC_SLAVE_ERR: |
574 | dev_err(tdc->tdma->dev, |
575 | "GPCDMA CH%d mc slave error\n" , tdc->id); |
576 | break; |
577 | |
578 | case TEGRA_DMA_MMIO_SLAVE_ERR: |
579 | dev_err(tdc->tdma->dev, |
580 | "GPCDMA CH%d mmio slave error\n" , tdc->id); |
581 | break; |
582 | |
583 | default: |
584 | dev_err(tdc->tdma->dev, |
585 | "GPCDMA CH%d security violation %x\n" , tdc->id, |
586 | err_status); |
587 | } |
588 | } |
589 | |
590 | static irqreturn_t tegra_dma_isr(int irq, void *dev_id) |
591 | { |
592 | struct tegra_dma_channel *tdc = dev_id; |
593 | struct tegra_dma_desc *dma_desc = tdc->dma_desc; |
594 | struct tegra_dma_sg_req *sg_req; |
595 | u32 status; |
596 | |
597 | /* Check channel error status register */ |
598 | status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS); |
599 | if (status) { |
600 | tegra_dma_chan_decode_error(tdc, err_status: status); |
601 | tegra_dma_dump_chan_regs(tdc); |
602 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS, val: 0xFFFFFFFF); |
603 | } |
604 | |
605 | spin_lock(lock: &tdc->vc.lock); |
606 | status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS); |
607 | if (!(status & TEGRA_GPCDMA_STATUS_ISE_EOC)) |
608 | goto irq_done; |
609 | |
610 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, |
611 | TEGRA_GPCDMA_STATUS_ISE_EOC); |
612 | |
613 | if (!dma_desc) |
614 | goto irq_done; |
615 | |
616 | sg_req = dma_desc->sg_req; |
617 | dma_desc->bytes_xfer += sg_req[dma_desc->sg_idx].len; |
618 | |
619 | if (dma_desc->cyclic) { |
620 | vchan_cyclic_callback(vd: &dma_desc->vd); |
621 | tegra_dma_configure_next_sg(tdc); |
622 | } else { |
623 | dma_desc->sg_idx++; |
624 | if (dma_desc->sg_idx == dma_desc->sg_count) |
625 | tegra_dma_xfer_complete(tdc); |
626 | else |
627 | tegra_dma_start(tdc); |
628 | } |
629 | |
630 | irq_done: |
631 | spin_unlock(lock: &tdc->vc.lock); |
632 | return IRQ_HANDLED; |
633 | } |
634 | |
635 | static void tegra_dma_issue_pending(struct dma_chan *dc) |
636 | { |
637 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
638 | unsigned long flags; |
639 | |
640 | if (tdc->dma_desc) |
641 | return; |
642 | |
643 | spin_lock_irqsave(&tdc->vc.lock, flags); |
644 | if (vchan_issue_pending(vc: &tdc->vc)) |
645 | tegra_dma_start(tdc); |
646 | |
647 | /* |
648 | * For cyclic DMA transfers, program the second |
649 | * transfer parameters as soon as the first DMA |
650 | * transfer is started inorder for the DMA |
651 | * controller to trigger the second transfer |
652 | * with the correct parameters. |
653 | */ |
654 | if (tdc->dma_desc && tdc->dma_desc->cyclic) |
655 | tegra_dma_configure_next_sg(tdc); |
656 | |
657 | spin_unlock_irqrestore(lock: &tdc->vc.lock, flags); |
658 | } |
659 | |
660 | static int tegra_dma_stop_client(struct tegra_dma_channel *tdc) |
661 | { |
662 | int ret; |
663 | u32 status, csr; |
664 | |
665 | /* |
666 | * Change the client associated with the DMA channel |
667 | * to stop DMA engine from starting any more bursts for |
668 | * the given client and wait for in flight bursts to complete |
669 | */ |
670 | csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR); |
671 | csr &= ~(TEGRA_GPCDMA_CSR_REQ_SEL_MASK); |
672 | csr |= TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED; |
673 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, val: csr); |
674 | |
675 | /* Wait for in flight data transfer to finish */ |
676 | udelay(TEGRA_GPCDMA_BURST_COMPLETE_TIME); |
677 | |
678 | /* If TX/RX path is still active wait till it becomes |
679 | * inactive |
680 | */ |
681 | |
682 | ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr + |
683 | tdc->chan_base_offset + |
684 | TEGRA_GPCDMA_CHAN_STATUS, |
685 | status, |
686 | !(status & (TEGRA_GPCDMA_STATUS_CHANNEL_TX | |
687 | TEGRA_GPCDMA_STATUS_CHANNEL_RX)), |
688 | 5, |
689 | TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT); |
690 | if (ret) { |
691 | dev_err(tdc2dev(tdc), "Timeout waiting for DMA burst completion!\n" ); |
692 | tegra_dma_dump_chan_regs(tdc); |
693 | } |
694 | |
695 | return ret; |
696 | } |
697 | |
698 | static int tegra_dma_terminate_all(struct dma_chan *dc) |
699 | { |
700 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
701 | unsigned long flags; |
702 | LIST_HEAD(head); |
703 | int err; |
704 | |
705 | spin_lock_irqsave(&tdc->vc.lock, flags); |
706 | |
707 | if (tdc->dma_desc) { |
708 | err = tdc->tdma->chip_data->terminate(tdc); |
709 | if (err) { |
710 | spin_unlock_irqrestore(lock: &tdc->vc.lock, flags); |
711 | return err; |
712 | } |
713 | |
714 | vchan_terminate_vdesc(vd: &tdc->dma_desc->vd); |
715 | tegra_dma_disable(tdc); |
716 | tdc->dma_desc = NULL; |
717 | } |
718 | |
719 | tegra_dma_sid_free(tdc); |
720 | vchan_get_all_descriptors(vc: &tdc->vc, head: &head); |
721 | spin_unlock_irqrestore(lock: &tdc->vc.lock, flags); |
722 | |
723 | vchan_dma_desc_free_list(vc: &tdc->vc, head: &head); |
724 | |
725 | return 0; |
726 | } |
727 | |
728 | static int tegra_dma_get_residual(struct tegra_dma_channel *tdc) |
729 | { |
730 | struct tegra_dma_desc *dma_desc = tdc->dma_desc; |
731 | struct tegra_dma_sg_req *sg_req = dma_desc->sg_req; |
732 | unsigned int bytes_xfer, residual; |
733 | u32 wcount = 0, status; |
734 | |
735 | wcount = tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT); |
736 | |
737 | /* |
738 | * Set wcount = 0 if EOC bit is set. The transfer would have |
739 | * already completed and the CHAN_XFER_COUNT could have updated |
740 | * for the next transfer, specifically in case of cyclic transfers. |
741 | */ |
742 | status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS); |
743 | if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) |
744 | wcount = 0; |
745 | |
746 | bytes_xfer = dma_desc->bytes_xfer + |
747 | sg_req[dma_desc->sg_idx].len - (wcount * 4); |
748 | |
749 | residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req); |
750 | |
751 | return residual; |
752 | } |
753 | |
754 | static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, |
755 | dma_cookie_t cookie, |
756 | struct dma_tx_state *txstate) |
757 | { |
758 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
759 | struct tegra_dma_desc *dma_desc; |
760 | struct virt_dma_desc *vd; |
761 | unsigned int residual; |
762 | unsigned long flags; |
763 | enum dma_status ret; |
764 | |
765 | ret = dma_cookie_status(chan: dc, cookie, state: txstate); |
766 | if (ret == DMA_COMPLETE) |
767 | return ret; |
768 | |
769 | spin_lock_irqsave(&tdc->vc.lock, flags); |
770 | vd = vchan_find_desc(&tdc->vc, cookie); |
771 | if (vd) { |
772 | dma_desc = vd_to_tegra_dma_desc(vd); |
773 | residual = dma_desc->bytes_req; |
774 | dma_set_residue(state: txstate, residue: residual); |
775 | } else if (tdc->dma_desc && tdc->dma_desc->vd.tx.cookie == cookie) { |
776 | residual = tegra_dma_get_residual(tdc); |
777 | dma_set_residue(state: txstate, residue: residual); |
778 | } else { |
779 | dev_err(tdc2dev(tdc), "cookie %d is not found\n" , cookie); |
780 | } |
781 | spin_unlock_irqrestore(lock: &tdc->vc.lock, flags); |
782 | |
783 | return ret; |
784 | } |
785 | |
786 | static inline int get_bus_width(struct tegra_dma_channel *tdc, |
787 | enum dma_slave_buswidth slave_bw) |
788 | { |
789 | switch (slave_bw) { |
790 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
791 | return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8; |
792 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
793 | return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16; |
794 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
795 | return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32; |
796 | default: |
797 | dev_err(tdc2dev(tdc), "given slave bus width is not supported\n" ); |
798 | return -EINVAL; |
799 | } |
800 | } |
801 | |
802 | static unsigned int get_burst_size(struct tegra_dma_channel *tdc, |
803 | u32 burst_size, enum dma_slave_buswidth slave_bw, |
804 | int len) |
805 | { |
806 | unsigned int burst_mmio_width, burst_byte; |
807 | |
808 | /* |
809 | * burst_size from client is in terms of the bus_width. |
810 | * convert that into words. |
811 | * If burst_size is not specified from client, then use |
812 | * len to calculate the optimum burst size |
813 | */ |
814 | burst_byte = burst_size ? burst_size * slave_bw : len; |
815 | burst_mmio_width = burst_byte / 4; |
816 | |
817 | if (burst_mmio_width < TEGRA_GPCDMA_MMIOSEQ_BURST_MIN) |
818 | return 0; |
819 | |
820 | burst_mmio_width = min(burst_mmio_width, TEGRA_GPCDMA_MMIOSEQ_BURST_MAX); |
821 | |
822 | return TEGRA_GPCDMA_MMIOSEQ_BURST(burst_mmio_width); |
823 | } |
824 | |
825 | static int get_transfer_param(struct tegra_dma_channel *tdc, |
826 | enum dma_transfer_direction direction, |
827 | u32 *apb_addr, |
828 | u32 *mmio_seq, |
829 | u32 *csr, |
830 | unsigned int *burst_size, |
831 | enum dma_slave_buswidth *slave_bw) |
832 | { |
833 | switch (direction) { |
834 | case DMA_MEM_TO_DEV: |
835 | *apb_addr = tdc->dma_sconfig.dst_addr; |
836 | *mmio_seq = get_bus_width(tdc, slave_bw: tdc->dma_sconfig.dst_addr_width); |
837 | *burst_size = tdc->dma_sconfig.dst_maxburst; |
838 | *slave_bw = tdc->dma_sconfig.dst_addr_width; |
839 | *csr = TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC; |
840 | return 0; |
841 | case DMA_DEV_TO_MEM: |
842 | *apb_addr = tdc->dma_sconfig.src_addr; |
843 | *mmio_seq = get_bus_width(tdc, slave_bw: tdc->dma_sconfig.src_addr_width); |
844 | *burst_size = tdc->dma_sconfig.src_maxburst; |
845 | *slave_bw = tdc->dma_sconfig.src_addr_width; |
846 | *csr = TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC; |
847 | return 0; |
848 | default: |
849 | dev_err(tdc2dev(tdc), "DMA direction is not supported\n" ); |
850 | } |
851 | |
852 | return -EINVAL; |
853 | } |
854 | |
855 | static struct dma_async_tx_descriptor * |
856 | tegra_dma_prep_dma_memset(struct dma_chan *dc, dma_addr_t dest, int value, |
857 | size_t len, unsigned long flags) |
858 | { |
859 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
860 | unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count; |
861 | struct tegra_dma_sg_req *sg_req; |
862 | struct tegra_dma_desc *dma_desc; |
863 | u32 csr, mc_seq; |
864 | |
865 | if ((len & 3) || (dest & 3) || len > max_dma_count) { |
866 | dev_err(tdc2dev(tdc), |
867 | "DMA length/memory address is not supported\n" ); |
868 | return NULL; |
869 | } |
870 | |
871 | /* Set DMA mode to fixed pattern */ |
872 | csr = TEGRA_GPCDMA_CSR_DMA_FIXED_PAT; |
873 | /* Enable once or continuous mode */ |
874 | csr |= TEGRA_GPCDMA_CSR_ONCE; |
875 | /* Enable IRQ mask */ |
876 | csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; |
877 | /* Enable the DMA interrupt */ |
878 | if (flags & DMA_PREP_INTERRUPT) |
879 | csr |= TEGRA_GPCDMA_CSR_IE_EOC; |
880 | /* Configure default priority weight for the channel */ |
881 | csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); |
882 | |
883 | mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); |
884 | /* retain stream-id and clean rest */ |
885 | mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK; |
886 | |
887 | /* Set the address wrapping */ |
888 | mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, |
889 | TEGRA_GPCDMA_MCSEQ_WRAP_NONE); |
890 | mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, |
891 | TEGRA_GPCDMA_MCSEQ_WRAP_NONE); |
892 | |
893 | /* Program outstanding MC requests */ |
894 | mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); |
895 | /* Set burst size */ |
896 | mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; |
897 | |
898 | dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT); |
899 | if (!dma_desc) |
900 | return NULL; |
901 | |
902 | dma_desc->bytes_req = len; |
903 | dma_desc->sg_count = 1; |
904 | sg_req = dma_desc->sg_req; |
905 | |
906 | sg_req[0].ch_regs.src_ptr = 0; |
907 | sg_req[0].ch_regs.dst_ptr = dest; |
908 | sg_req[0].ch_regs.high_addr_ptr = |
909 | FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32)); |
910 | sg_req[0].ch_regs.fixed_pattern = value; |
911 | /* Word count reg takes value as (N +1) words */ |
912 | sg_req[0].ch_regs.wcount = ((len - 4) >> 2); |
913 | sg_req[0].ch_regs.csr = csr; |
914 | sg_req[0].ch_regs.mmio_seq = 0; |
915 | sg_req[0].ch_regs.mc_seq = mc_seq; |
916 | sg_req[0].len = len; |
917 | |
918 | dma_desc->cyclic = false; |
919 | return vchan_tx_prep(vc: &tdc->vc, vd: &dma_desc->vd, tx_flags: flags); |
920 | } |
921 | |
922 | static struct dma_async_tx_descriptor * |
923 | tegra_dma_prep_dma_memcpy(struct dma_chan *dc, dma_addr_t dest, |
924 | dma_addr_t src, size_t len, unsigned long flags) |
925 | { |
926 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
927 | struct tegra_dma_sg_req *sg_req; |
928 | struct tegra_dma_desc *dma_desc; |
929 | unsigned int max_dma_count; |
930 | u32 csr, mc_seq; |
931 | |
932 | max_dma_count = tdc->tdma->chip_data->max_dma_count; |
933 | if ((len & 3) || (src & 3) || (dest & 3) || len > max_dma_count) { |
934 | dev_err(tdc2dev(tdc), |
935 | "DMA length/memory address is not supported\n" ); |
936 | return NULL; |
937 | } |
938 | |
939 | /* Set DMA mode to memory to memory transfer */ |
940 | csr = TEGRA_GPCDMA_CSR_DMA_MEM2MEM; |
941 | /* Enable once or continuous mode */ |
942 | csr |= TEGRA_GPCDMA_CSR_ONCE; |
943 | /* Enable IRQ mask */ |
944 | csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; |
945 | /* Enable the DMA interrupt */ |
946 | if (flags & DMA_PREP_INTERRUPT) |
947 | csr |= TEGRA_GPCDMA_CSR_IE_EOC; |
948 | /* Configure default priority weight for the channel */ |
949 | csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); |
950 | |
951 | mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); |
952 | /* retain stream-id and clean rest */ |
953 | mc_seq &= (TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK) | |
954 | (TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK); |
955 | |
956 | /* Set the address wrapping */ |
957 | mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, |
958 | TEGRA_GPCDMA_MCSEQ_WRAP_NONE); |
959 | mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, |
960 | TEGRA_GPCDMA_MCSEQ_WRAP_NONE); |
961 | |
962 | /* Program outstanding MC requests */ |
963 | mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); |
964 | /* Set burst size */ |
965 | mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; |
966 | |
967 | dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT); |
968 | if (!dma_desc) |
969 | return NULL; |
970 | |
971 | dma_desc->bytes_req = len; |
972 | dma_desc->sg_count = 1; |
973 | sg_req = dma_desc->sg_req; |
974 | |
975 | sg_req[0].ch_regs.src_ptr = src; |
976 | sg_req[0].ch_regs.dst_ptr = dest; |
977 | sg_req[0].ch_regs.high_addr_ptr = |
978 | FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (src >> 32)); |
979 | sg_req[0].ch_regs.high_addr_ptr |= |
980 | FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32)); |
981 | /* Word count reg takes value as (N +1) words */ |
982 | sg_req[0].ch_regs.wcount = ((len - 4) >> 2); |
983 | sg_req[0].ch_regs.csr = csr; |
984 | sg_req[0].ch_regs.mmio_seq = 0; |
985 | sg_req[0].ch_regs.mc_seq = mc_seq; |
986 | sg_req[0].len = len; |
987 | |
988 | dma_desc->cyclic = false; |
989 | return vchan_tx_prep(vc: &tdc->vc, vd: &dma_desc->vd, tx_flags: flags); |
990 | } |
991 | |
992 | static struct dma_async_tx_descriptor * |
993 | tegra_dma_prep_slave_sg(struct dma_chan *dc, struct scatterlist *sgl, |
994 | unsigned int sg_len, enum dma_transfer_direction direction, |
995 | unsigned long flags, void *context) |
996 | { |
997 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
998 | unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count; |
999 | enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED; |
1000 | u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0; |
1001 | struct tegra_dma_sg_req *sg_req; |
1002 | struct tegra_dma_desc *dma_desc; |
1003 | struct scatterlist *sg; |
1004 | u32 burst_size; |
1005 | unsigned int i; |
1006 | int ret; |
1007 | |
1008 | if (!tdc->config_init) { |
1009 | dev_err(tdc2dev(tdc), "DMA channel is not configured\n" ); |
1010 | return NULL; |
1011 | } |
1012 | if (sg_len < 1) { |
1013 | dev_err(tdc2dev(tdc), "Invalid segment length %d\n" , sg_len); |
1014 | return NULL; |
1015 | } |
1016 | |
1017 | ret = tegra_dma_sid_reserve(tdc, direction); |
1018 | if (ret) |
1019 | return NULL; |
1020 | |
1021 | ret = get_transfer_param(tdc, direction, apb_addr: &apb_ptr, mmio_seq: &mmio_seq, csr: &csr, |
1022 | burst_size: &burst_size, slave_bw: &slave_bw); |
1023 | if (ret < 0) |
1024 | return NULL; |
1025 | |
1026 | /* Enable once or continuous mode */ |
1027 | csr |= TEGRA_GPCDMA_CSR_ONCE; |
1028 | /* Program the slave id in requestor select */ |
1029 | csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id); |
1030 | /* Enable IRQ mask */ |
1031 | csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; |
1032 | /* Configure default priority weight for the channel*/ |
1033 | csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); |
1034 | |
1035 | /* Enable the DMA interrupt */ |
1036 | if (flags & DMA_PREP_INTERRUPT) |
1037 | csr |= TEGRA_GPCDMA_CSR_IE_EOC; |
1038 | |
1039 | mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); |
1040 | /* retain stream-id and clean rest */ |
1041 | mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK; |
1042 | |
1043 | /* Set the address wrapping on both MC and MMIO side */ |
1044 | |
1045 | mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, |
1046 | TEGRA_GPCDMA_MCSEQ_WRAP_NONE); |
1047 | mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, |
1048 | TEGRA_GPCDMA_MCSEQ_WRAP_NONE); |
1049 | mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1); |
1050 | |
1051 | /* Program 2 MC outstanding requests by default. */ |
1052 | mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); |
1053 | |
1054 | /* Setting MC burst size depending on MMIO burst size */ |
1055 | if (burst_size == 64) |
1056 | mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; |
1057 | else |
1058 | mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2; |
1059 | |
1060 | dma_desc = kzalloc(struct_size(dma_desc, sg_req, sg_len), GFP_NOWAIT); |
1061 | if (!dma_desc) |
1062 | return NULL; |
1063 | |
1064 | dma_desc->sg_count = sg_len; |
1065 | sg_req = dma_desc->sg_req; |
1066 | |
1067 | /* Make transfer requests */ |
1068 | for_each_sg(sgl, sg, sg_len, i) { |
1069 | u32 len; |
1070 | dma_addr_t mem; |
1071 | |
1072 | mem = sg_dma_address(sg); |
1073 | len = sg_dma_len(sg); |
1074 | |
1075 | if ((len & 3) || (mem & 3) || len > max_dma_count) { |
1076 | dev_err(tdc2dev(tdc), |
1077 | "DMA length/memory address is not supported\n" ); |
1078 | kfree(objp: dma_desc); |
1079 | return NULL; |
1080 | } |
1081 | |
1082 | mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len); |
1083 | dma_desc->bytes_req += len; |
1084 | |
1085 | if (direction == DMA_MEM_TO_DEV) { |
1086 | sg_req[i].ch_regs.src_ptr = mem; |
1087 | sg_req[i].ch_regs.dst_ptr = apb_ptr; |
1088 | sg_req[i].ch_regs.high_addr_ptr = |
1089 | FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32)); |
1090 | } else if (direction == DMA_DEV_TO_MEM) { |
1091 | sg_req[i].ch_regs.src_ptr = apb_ptr; |
1092 | sg_req[i].ch_regs.dst_ptr = mem; |
1093 | sg_req[i].ch_regs.high_addr_ptr = |
1094 | FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32)); |
1095 | } |
1096 | |
1097 | /* |
1098 | * Word count register takes input in words. Writing a value |
1099 | * of N into word count register means a req of (N+1) words. |
1100 | */ |
1101 | sg_req[i].ch_regs.wcount = ((len - 4) >> 2); |
1102 | sg_req[i].ch_regs.csr = csr; |
1103 | sg_req[i].ch_regs.mmio_seq = mmio_seq; |
1104 | sg_req[i].ch_regs.mc_seq = mc_seq; |
1105 | sg_req[i].len = len; |
1106 | } |
1107 | |
1108 | dma_desc->cyclic = false; |
1109 | return vchan_tx_prep(vc: &tdc->vc, vd: &dma_desc->vd, tx_flags: flags); |
1110 | } |
1111 | |
1112 | static struct dma_async_tx_descriptor * |
1113 | tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, |
1114 | size_t period_len, enum dma_transfer_direction direction, |
1115 | unsigned long flags) |
1116 | { |
1117 | enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED; |
1118 | u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0, burst_size; |
1119 | unsigned int max_dma_count, len, period_count, i; |
1120 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
1121 | struct tegra_dma_desc *dma_desc; |
1122 | struct tegra_dma_sg_req *sg_req; |
1123 | dma_addr_t mem = buf_addr; |
1124 | int ret; |
1125 | |
1126 | if (!buf_len || !period_len) { |
1127 | dev_err(tdc2dev(tdc), "Invalid buffer/period len\n" ); |
1128 | return NULL; |
1129 | } |
1130 | |
1131 | if (!tdc->config_init) { |
1132 | dev_err(tdc2dev(tdc), "DMA slave is not configured\n" ); |
1133 | return NULL; |
1134 | } |
1135 | |
1136 | ret = tegra_dma_sid_reserve(tdc, direction); |
1137 | if (ret) |
1138 | return NULL; |
1139 | |
1140 | /* |
1141 | * We only support cycle transfer when buf_len is multiple of |
1142 | * period_len. |
1143 | */ |
1144 | if (buf_len % period_len) { |
1145 | dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n" ); |
1146 | return NULL; |
1147 | } |
1148 | |
1149 | len = period_len; |
1150 | max_dma_count = tdc->tdma->chip_data->max_dma_count; |
1151 | if ((len & 3) || (buf_addr & 3) || len > max_dma_count) { |
1152 | dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n" ); |
1153 | return NULL; |
1154 | } |
1155 | |
1156 | ret = get_transfer_param(tdc, direction, apb_addr: &apb_ptr, mmio_seq: &mmio_seq, csr: &csr, |
1157 | burst_size: &burst_size, slave_bw: &slave_bw); |
1158 | if (ret < 0) |
1159 | return NULL; |
1160 | |
1161 | /* Enable once or continuous mode */ |
1162 | csr &= ~TEGRA_GPCDMA_CSR_ONCE; |
1163 | /* Program the slave id in requestor select */ |
1164 | csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id); |
1165 | /* Enable IRQ mask */ |
1166 | csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; |
1167 | /* Configure default priority weight for the channel*/ |
1168 | csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); |
1169 | |
1170 | /* Enable the DMA interrupt */ |
1171 | if (flags & DMA_PREP_INTERRUPT) |
1172 | csr |= TEGRA_GPCDMA_CSR_IE_EOC; |
1173 | |
1174 | mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1); |
1175 | |
1176 | mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); |
1177 | /* retain stream-id and clean rest */ |
1178 | mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK; |
1179 | |
1180 | /* Set the address wrapping on both MC and MMIO side */ |
1181 | mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, |
1182 | TEGRA_GPCDMA_MCSEQ_WRAP_NONE); |
1183 | mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, |
1184 | TEGRA_GPCDMA_MCSEQ_WRAP_NONE); |
1185 | |
1186 | /* Program 2 MC outstanding requests by default. */ |
1187 | mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); |
1188 | /* Setting MC burst size depending on MMIO burst size */ |
1189 | if (burst_size == 64) |
1190 | mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; |
1191 | else |
1192 | mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2; |
1193 | |
1194 | period_count = buf_len / period_len; |
1195 | dma_desc = kzalloc(struct_size(dma_desc, sg_req, period_count), |
1196 | GFP_NOWAIT); |
1197 | if (!dma_desc) |
1198 | return NULL; |
1199 | |
1200 | dma_desc->bytes_req = buf_len; |
1201 | dma_desc->sg_count = period_count; |
1202 | sg_req = dma_desc->sg_req; |
1203 | |
1204 | /* Split transfer equal to period size */ |
1205 | for (i = 0; i < period_count; i++) { |
1206 | mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len); |
1207 | if (direction == DMA_MEM_TO_DEV) { |
1208 | sg_req[i].ch_regs.src_ptr = mem; |
1209 | sg_req[i].ch_regs.dst_ptr = apb_ptr; |
1210 | sg_req[i].ch_regs.high_addr_ptr = |
1211 | FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32)); |
1212 | } else if (direction == DMA_DEV_TO_MEM) { |
1213 | sg_req[i].ch_regs.src_ptr = apb_ptr; |
1214 | sg_req[i].ch_regs.dst_ptr = mem; |
1215 | sg_req[i].ch_regs.high_addr_ptr = |
1216 | FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32)); |
1217 | } |
1218 | /* |
1219 | * Word count register takes input in words. Writing a value |
1220 | * of N into word count register means a req of (N+1) words. |
1221 | */ |
1222 | sg_req[i].ch_regs.wcount = ((len - 4) >> 2); |
1223 | sg_req[i].ch_regs.csr = csr; |
1224 | sg_req[i].ch_regs.mmio_seq = mmio_seq; |
1225 | sg_req[i].ch_regs.mc_seq = mc_seq; |
1226 | sg_req[i].len = len; |
1227 | |
1228 | mem += len; |
1229 | } |
1230 | |
1231 | dma_desc->cyclic = true; |
1232 | |
1233 | return vchan_tx_prep(vc: &tdc->vc, vd: &dma_desc->vd, tx_flags: flags); |
1234 | } |
1235 | |
1236 | static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) |
1237 | { |
1238 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
1239 | int ret; |
1240 | |
1241 | ret = request_irq(irq: tdc->irq, handler: tegra_dma_isr, flags: 0, name: tdc->name, dev: tdc); |
1242 | if (ret) { |
1243 | dev_err(tdc2dev(tdc), "request_irq failed for %s\n" , tdc->name); |
1244 | return ret; |
1245 | } |
1246 | |
1247 | dma_cookie_init(chan: &tdc->vc.chan); |
1248 | tdc->config_init = false; |
1249 | return 0; |
1250 | } |
1251 | |
1252 | static void tegra_dma_chan_synchronize(struct dma_chan *dc) |
1253 | { |
1254 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
1255 | |
1256 | synchronize_irq(irq: tdc->irq); |
1257 | vchan_synchronize(vc: &tdc->vc); |
1258 | } |
1259 | |
1260 | static void tegra_dma_free_chan_resources(struct dma_chan *dc) |
1261 | { |
1262 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
1263 | |
1264 | dev_dbg(tdc2dev(tdc), "Freeing channel %d\n" , tdc->id); |
1265 | |
1266 | tegra_dma_terminate_all(dc); |
1267 | synchronize_irq(irq: tdc->irq); |
1268 | |
1269 | tasklet_kill(t: &tdc->vc.task); |
1270 | tdc->config_init = false; |
1271 | tdc->slave_id = -1; |
1272 | tdc->sid_dir = DMA_TRANS_NONE; |
1273 | free_irq(tdc->irq, tdc); |
1274 | |
1275 | vchan_free_chan_resources(vc: &tdc->vc); |
1276 | } |
1277 | |
1278 | static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, |
1279 | struct of_dma *ofdma) |
1280 | { |
1281 | struct tegra_dma *tdma = ofdma->of_dma_data; |
1282 | struct tegra_dma_channel *tdc; |
1283 | struct dma_chan *chan; |
1284 | |
1285 | chan = dma_get_any_slave_channel(device: &tdma->dma_dev); |
1286 | if (!chan) |
1287 | return NULL; |
1288 | |
1289 | tdc = to_tegra_dma_chan(dc: chan); |
1290 | tdc->slave_id = dma_spec->args[0]; |
1291 | |
1292 | return chan; |
1293 | } |
1294 | |
1295 | static const struct tegra_dma_chip_data tegra186_dma_chip_data = { |
1296 | .nr_channels = 32, |
1297 | .channel_reg_size = SZ_64K, |
1298 | .max_dma_count = SZ_1G, |
1299 | .hw_support_pause = false, |
1300 | .terminate = tegra_dma_stop_client, |
1301 | }; |
1302 | |
1303 | static const struct tegra_dma_chip_data tegra194_dma_chip_data = { |
1304 | .nr_channels = 32, |
1305 | .channel_reg_size = SZ_64K, |
1306 | .max_dma_count = SZ_1G, |
1307 | .hw_support_pause = true, |
1308 | .terminate = tegra_dma_pause, |
1309 | }; |
1310 | |
1311 | static const struct tegra_dma_chip_data tegra234_dma_chip_data = { |
1312 | .nr_channels = 32, |
1313 | .channel_reg_size = SZ_64K, |
1314 | .max_dma_count = SZ_1G, |
1315 | .hw_support_pause = true, |
1316 | .terminate = tegra_dma_pause_noerr, |
1317 | }; |
1318 | |
1319 | static const struct of_device_id tegra_dma_of_match[] = { |
1320 | { |
1321 | .compatible = "nvidia,tegra186-gpcdma" , |
1322 | .data = &tegra186_dma_chip_data, |
1323 | }, { |
1324 | .compatible = "nvidia,tegra194-gpcdma" , |
1325 | .data = &tegra194_dma_chip_data, |
1326 | }, { |
1327 | .compatible = "nvidia,tegra234-gpcdma" , |
1328 | .data = &tegra234_dma_chip_data, |
1329 | }, { |
1330 | }, |
1331 | }; |
1332 | MODULE_DEVICE_TABLE(of, tegra_dma_of_match); |
1333 | |
1334 | static int tegra_dma_program_sid(struct tegra_dma_channel *tdc, int stream_id) |
1335 | { |
1336 | unsigned int reg_val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); |
1337 | |
1338 | reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK); |
1339 | reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK); |
1340 | |
1341 | reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK, stream_id); |
1342 | reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK, stream_id); |
1343 | |
1344 | tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, val: reg_val); |
1345 | return 0; |
1346 | } |
1347 | |
1348 | static int tegra_dma_probe(struct platform_device *pdev) |
1349 | { |
1350 | const struct tegra_dma_chip_data *cdata = NULL; |
1351 | struct iommu_fwspec *iommu_spec; |
1352 | unsigned int stream_id, i; |
1353 | struct tegra_dma *tdma; |
1354 | int ret; |
1355 | |
1356 | cdata = of_device_get_match_data(dev: &pdev->dev); |
1357 | |
1358 | tdma = devm_kzalloc(dev: &pdev->dev, |
1359 | struct_size(tdma, channels, cdata->nr_channels), |
1360 | GFP_KERNEL); |
1361 | if (!tdma) |
1362 | return -ENOMEM; |
1363 | |
1364 | tdma->dev = &pdev->dev; |
1365 | tdma->chip_data = cdata; |
1366 | platform_set_drvdata(pdev, data: tdma); |
1367 | |
1368 | tdma->base_addr = devm_platform_ioremap_resource(pdev, index: 0); |
1369 | if (IS_ERR(ptr: tdma->base_addr)) |
1370 | return PTR_ERR(ptr: tdma->base_addr); |
1371 | |
1372 | tdma->rst = devm_reset_control_get_exclusive(dev: &pdev->dev, id: "gpcdma" ); |
1373 | if (IS_ERR(ptr: tdma->rst)) { |
1374 | return dev_err_probe(dev: &pdev->dev, err: PTR_ERR(ptr: tdma->rst), |
1375 | fmt: "Missing controller reset\n" ); |
1376 | } |
1377 | reset_control_reset(rstc: tdma->rst); |
1378 | |
1379 | tdma->dma_dev.dev = &pdev->dev; |
1380 | |
1381 | iommu_spec = dev_iommu_fwspec_get(dev: &pdev->dev); |
1382 | if (!iommu_spec) { |
1383 | dev_err(&pdev->dev, "Missing iommu stream-id\n" ); |
1384 | return -EINVAL; |
1385 | } |
1386 | stream_id = iommu_spec->ids[0] & 0xffff; |
1387 | |
1388 | ret = device_property_read_u32(dev: &pdev->dev, propname: "dma-channel-mask" , |
1389 | val: &tdma->chan_mask); |
1390 | if (ret) { |
1391 | dev_warn(&pdev->dev, |
1392 | "Missing dma-channel-mask property, using default channel mask %#x\n" , |
1393 | TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK); |
1394 | tdma->chan_mask = TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK; |
1395 | } |
1396 | |
1397 | INIT_LIST_HEAD(list: &tdma->dma_dev.channels); |
1398 | for (i = 0; i < cdata->nr_channels; i++) { |
1399 | struct tegra_dma_channel *tdc = &tdma->channels[i]; |
1400 | |
1401 | /* Check for channel mask */ |
1402 | if (!(tdma->chan_mask & BIT(i))) |
1403 | continue; |
1404 | |
1405 | tdc->irq = platform_get_irq(pdev, i); |
1406 | if (tdc->irq < 0) |
1407 | return tdc->irq; |
1408 | |
1409 | tdc->chan_base_offset = TEGRA_GPCDMA_CHANNEL_BASE_ADDR_OFFSET + |
1410 | i * cdata->channel_reg_size; |
1411 | snprintf(buf: tdc->name, size: sizeof(tdc->name), fmt: "gpcdma.%d" , i); |
1412 | tdc->tdma = tdma; |
1413 | tdc->id = i; |
1414 | tdc->slave_id = -1; |
1415 | |
1416 | vchan_init(vc: &tdc->vc, dmadev: &tdma->dma_dev); |
1417 | tdc->vc.desc_free = tegra_dma_desc_free; |
1418 | |
1419 | /* program stream-id for this channel */ |
1420 | tegra_dma_program_sid(tdc, stream_id); |
1421 | tdc->stream_id = stream_id; |
1422 | } |
1423 | |
1424 | dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); |
1425 | dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); |
1426 | dma_cap_set(DMA_MEMCPY, tdma->dma_dev.cap_mask); |
1427 | dma_cap_set(DMA_MEMSET, tdma->dma_dev.cap_mask); |
1428 | dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); |
1429 | |
1430 | /* |
1431 | * Only word aligned transfers are supported. Set the copy |
1432 | * alignment shift. |
1433 | */ |
1434 | tdma->dma_dev.copy_align = 2; |
1435 | tdma->dma_dev.fill_align = 2; |
1436 | tdma->dma_dev.device_alloc_chan_resources = |
1437 | tegra_dma_alloc_chan_resources; |
1438 | tdma->dma_dev.device_free_chan_resources = |
1439 | tegra_dma_free_chan_resources; |
1440 | tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; |
1441 | tdma->dma_dev.device_prep_dma_memcpy = tegra_dma_prep_dma_memcpy; |
1442 | tdma->dma_dev.device_prep_dma_memset = tegra_dma_prep_dma_memset; |
1443 | tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; |
1444 | tdma->dma_dev.device_config = tegra_dma_slave_config; |
1445 | tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all; |
1446 | tdma->dma_dev.device_tx_status = tegra_dma_tx_status; |
1447 | tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; |
1448 | tdma->dma_dev.device_pause = tegra_dma_device_pause; |
1449 | tdma->dma_dev.device_resume = tegra_dma_device_resume; |
1450 | tdma->dma_dev.device_synchronize = tegra_dma_chan_synchronize; |
1451 | tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
1452 | |
1453 | ret = dma_async_device_register(device: &tdma->dma_dev); |
1454 | if (ret < 0) { |
1455 | dev_err_probe(dev: &pdev->dev, err: ret, |
1456 | fmt: "GPC DMA driver registration failed\n" ); |
1457 | return ret; |
1458 | } |
1459 | |
1460 | ret = of_dma_controller_register(np: pdev->dev.of_node, |
1461 | of_dma_xlate: tegra_dma_of_xlate, data: tdma); |
1462 | if (ret < 0) { |
1463 | dev_err_probe(dev: &pdev->dev, err: ret, |
1464 | fmt: "GPC DMA OF registration failed\n" ); |
1465 | |
1466 | dma_async_device_unregister(device: &tdma->dma_dev); |
1467 | return ret; |
1468 | } |
1469 | |
1470 | dev_info(&pdev->dev, "GPC DMA driver register %lu channels\n" , |
1471 | hweight_long(tdma->chan_mask)); |
1472 | |
1473 | return 0; |
1474 | } |
1475 | |
1476 | static void tegra_dma_remove(struct platform_device *pdev) |
1477 | { |
1478 | struct tegra_dma *tdma = platform_get_drvdata(pdev); |
1479 | |
1480 | of_dma_controller_free(np: pdev->dev.of_node); |
1481 | dma_async_device_unregister(device: &tdma->dma_dev); |
1482 | } |
1483 | |
1484 | static int __maybe_unused tegra_dma_pm_suspend(struct device *dev) |
1485 | { |
1486 | struct tegra_dma *tdma = dev_get_drvdata(dev); |
1487 | unsigned int i; |
1488 | |
1489 | for (i = 0; i < tdma->chip_data->nr_channels; i++) { |
1490 | struct tegra_dma_channel *tdc = &tdma->channels[i]; |
1491 | |
1492 | if (!(tdma->chan_mask & BIT(i))) |
1493 | continue; |
1494 | |
1495 | if (tdc->dma_desc) { |
1496 | dev_err(tdma->dev, "channel %u busy\n" , i); |
1497 | return -EBUSY; |
1498 | } |
1499 | } |
1500 | |
1501 | return 0; |
1502 | } |
1503 | |
1504 | static int __maybe_unused tegra_dma_pm_resume(struct device *dev) |
1505 | { |
1506 | struct tegra_dma *tdma = dev_get_drvdata(dev); |
1507 | unsigned int i; |
1508 | |
1509 | reset_control_reset(rstc: tdma->rst); |
1510 | |
1511 | for (i = 0; i < tdma->chip_data->nr_channels; i++) { |
1512 | struct tegra_dma_channel *tdc = &tdma->channels[i]; |
1513 | |
1514 | if (!(tdma->chan_mask & BIT(i))) |
1515 | continue; |
1516 | |
1517 | tegra_dma_program_sid(tdc, stream_id: tdc->stream_id); |
1518 | } |
1519 | |
1520 | return 0; |
1521 | } |
1522 | |
1523 | static const struct dev_pm_ops tegra_dma_dev_pm_ops = { |
1524 | SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume) |
1525 | }; |
1526 | |
1527 | static struct platform_driver tegra_dma_driver = { |
1528 | .driver = { |
1529 | .name = "tegra-gpcdma" , |
1530 | .pm = &tegra_dma_dev_pm_ops, |
1531 | .of_match_table = tegra_dma_of_match, |
1532 | }, |
1533 | .probe = tegra_dma_probe, |
1534 | .remove_new = tegra_dma_remove, |
1535 | }; |
1536 | |
1537 | module_platform_driver(tegra_dma_driver); |
1538 | |
1539 | MODULE_DESCRIPTION("NVIDIA Tegra GPC DMA Controller driver" ); |
1540 | MODULE_AUTHOR("Pavan Kunapuli <pkunapuli@nvidia.com>" ); |
1541 | MODULE_AUTHOR("Rajesh Gumasta <rgumasta@nvidia.com>" ); |
1542 | MODULE_LICENSE("GPL" ); |
1543 | |