1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Applied Micro X-Gene SoC DMA engine Driver |
4 | * |
5 | * Copyright (c) 2015, Applied Micro Circuits Corporation |
6 | * Authors: Rameshwar Prasad Sahu <rsahu@apm.com> |
7 | * Loc Ho <lho@apm.com> |
8 | * |
9 | * NOTE: PM support is currently not available. |
10 | */ |
11 | |
12 | #include <linux/acpi.h> |
13 | #include <linux/clk.h> |
14 | #include <linux/delay.h> |
15 | #include <linux/dma-mapping.h> |
16 | #include <linux/dmaengine.h> |
17 | #include <linux/dmapool.h> |
18 | #include <linux/interrupt.h> |
19 | #include <linux/io.h> |
20 | #include <linux/irq.h> |
21 | #include <linux/mod_devicetable.h> |
22 | #include <linux/module.h> |
23 | #include <linux/platform_device.h> |
24 | |
25 | #include "dmaengine.h" |
26 | |
27 | /* X-Gene DMA ring csr registers and bit definations */ |
28 | #define XGENE_DMA_RING_CONFIG 0x04 |
29 | #define XGENE_DMA_RING_ENABLE BIT(31) |
30 | #define XGENE_DMA_RING_ID 0x08 |
31 | #define XGENE_DMA_RING_ID_SETUP(v) ((v) | BIT(31)) |
32 | #define XGENE_DMA_RING_ID_BUF 0x0C |
33 | #define XGENE_DMA_RING_ID_BUF_SETUP(v) (((v) << 9) | BIT(21)) |
34 | #define XGENE_DMA_RING_THRESLD0_SET1 0x30 |
35 | #define XGENE_DMA_RING_THRESLD0_SET1_VAL 0X64 |
36 | #define XGENE_DMA_RING_THRESLD1_SET1 0x34 |
37 | #define XGENE_DMA_RING_THRESLD1_SET1_VAL 0xC8 |
38 | #define XGENE_DMA_RING_HYSTERESIS 0x68 |
39 | #define XGENE_DMA_RING_HYSTERESIS_VAL 0xFFFFFFFF |
40 | #define XGENE_DMA_RING_STATE 0x6C |
41 | #define XGENE_DMA_RING_STATE_WR_BASE 0x70 |
42 | #define XGENE_DMA_RING_NE_INT_MODE 0x017C |
43 | #define XGENE_DMA_RING_NE_INT_MODE_SET(m, v) \ |
44 | ((m) = ((m) & ~BIT(31 - (v))) | BIT(31 - (v))) |
45 | #define XGENE_DMA_RING_NE_INT_MODE_RESET(m, v) \ |
46 | ((m) &= (~BIT(31 - (v)))) |
47 | #define XGENE_DMA_RING_CLKEN 0xC208 |
48 | #define XGENE_DMA_RING_SRST 0xC200 |
49 | #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 |
50 | #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 |
51 | #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF |
52 | #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) |
53 | #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) |
54 | #define XGENE_DMA_RING_CMD_OFFSET 0x2C |
55 | #define XGENE_DMA_RING_CMD_BASE_OFFSET(v) ((v) << 6) |
56 | #define XGENE_DMA_RING_COHERENT_SET(m) \ |
57 | (((u32 *)(m))[2] |= BIT(4)) |
58 | #define XGENE_DMA_RING_ADDRL_SET(m, v) \ |
59 | (((u32 *)(m))[2] |= (((v) >> 8) << 5)) |
60 | #define XGENE_DMA_RING_ADDRH_SET(m, v) \ |
61 | (((u32 *)(m))[3] |= ((v) >> 35)) |
62 | #define XGENE_DMA_RING_ACCEPTLERR_SET(m) \ |
63 | (((u32 *)(m))[3] |= BIT(19)) |
64 | #define XGENE_DMA_RING_SIZE_SET(m, v) \ |
65 | (((u32 *)(m))[3] |= ((v) << 23)) |
66 | #define XGENE_DMA_RING_RECOMBBUF_SET(m) \ |
67 | (((u32 *)(m))[3] |= BIT(27)) |
68 | #define XGENE_DMA_RING_RECOMTIMEOUTL_SET(m) \ |
69 | (((u32 *)(m))[3] |= (0x7 << 28)) |
70 | #define XGENE_DMA_RING_RECOMTIMEOUTH_SET(m) \ |
71 | (((u32 *)(m))[4] |= 0x3) |
72 | #define XGENE_DMA_RING_SELTHRSH_SET(m) \ |
73 | (((u32 *)(m))[4] |= BIT(3)) |
74 | #define XGENE_DMA_RING_TYPE_SET(m, v) \ |
75 | (((u32 *)(m))[4] |= ((v) << 19)) |
76 | |
77 | /* X-Gene DMA device csr registers and bit definitions */ |
78 | #define XGENE_DMA_IPBRR 0x0 |
79 | #define XGENE_DMA_DEV_ID_RD(v) ((v) & 0x00000FFF) |
80 | #define XGENE_DMA_BUS_ID_RD(v) (((v) >> 12) & 3) |
81 | #define XGENE_DMA_REV_NO_RD(v) (((v) >> 14) & 3) |
82 | #define XGENE_DMA_GCR 0x10 |
83 | #define XGENE_DMA_CH_SETUP(v) \ |
84 | ((v) = ((v) & ~0x000FFFFF) | 0x000AAFFF) |
85 | #define XGENE_DMA_ENABLE(v) ((v) |= BIT(31)) |
86 | #define XGENE_DMA_DISABLE(v) ((v) &= ~BIT(31)) |
87 | #define XGENE_DMA_RAID6_CONT 0x14 |
88 | #define XGENE_DMA_RAID6_MULTI_CTRL(v) ((v) << 24) |
89 | #define XGENE_DMA_INT 0x70 |
90 | #define XGENE_DMA_INT_MASK 0x74 |
91 | #define XGENE_DMA_INT_ALL_MASK 0xFFFFFFFF |
92 | #define XGENE_DMA_INT_ALL_UNMASK 0x0 |
93 | #define XGENE_DMA_INT_MASK_SHIFT 0x14 |
94 | #define XGENE_DMA_RING_INT0_MASK 0x90A0 |
95 | #define XGENE_DMA_RING_INT1_MASK 0x90A8 |
96 | #define XGENE_DMA_RING_INT2_MASK 0x90B0 |
97 | #define XGENE_DMA_RING_INT3_MASK 0x90B8 |
98 | #define XGENE_DMA_RING_INT4_MASK 0x90C0 |
99 | #define XGENE_DMA_CFG_RING_WQ_ASSOC 0x90E0 |
100 | #define XGENE_DMA_ASSOC_RING_MNGR1 0xFFFFFFFF |
101 | #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 |
102 | #define XGENE_DMA_BLK_MEM_RDY 0xD074 |
103 | #define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF |
104 | #define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000 |
105 | |
106 | /* X-Gene SoC EFUSE csr register and bit defination */ |
107 | #define XGENE_SOC_JTAG1_SHADOW 0x18 |
108 | #define XGENE_DMA_PQ_DISABLE_MASK BIT(13) |
109 | |
110 | /* X-Gene DMA Descriptor format */ |
111 | #define XGENE_DMA_DESC_NV_BIT BIT_ULL(50) |
112 | #define XGENE_DMA_DESC_IN_BIT BIT_ULL(55) |
113 | #define XGENE_DMA_DESC_C_BIT BIT_ULL(63) |
114 | #define XGENE_DMA_DESC_DR_BIT BIT_ULL(61) |
115 | #define XGENE_DMA_DESC_ELERR_POS 46 |
116 | #define XGENE_DMA_DESC_RTYPE_POS 56 |
117 | #define XGENE_DMA_DESC_LERR_POS 60 |
118 | #define XGENE_DMA_DESC_BUFLEN_POS 48 |
119 | #define XGENE_DMA_DESC_HOENQ_NUM_POS 48 |
120 | #define XGENE_DMA_DESC_ELERR_RD(m) \ |
121 | (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3) |
122 | #define XGENE_DMA_DESC_LERR_RD(m) \ |
123 | (((m) >> XGENE_DMA_DESC_LERR_POS) & 0x7) |
124 | #define XGENE_DMA_DESC_STATUS(elerr, lerr) \ |
125 | (((elerr) << 4) | (lerr)) |
126 | |
127 | /* X-Gene DMA descriptor empty s/w signature */ |
128 | #define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL |
129 | |
130 | /* X-Gene DMA configurable parameters defines */ |
131 | #define XGENE_DMA_RING_NUM 512 |
132 | #define XGENE_DMA_BUFNUM 0x0 |
133 | #define XGENE_DMA_CPU_BUFNUM 0x18 |
134 | #define XGENE_DMA_RING_OWNER_DMA 0x03 |
135 | #define XGENE_DMA_RING_OWNER_CPU 0x0F |
136 | #define XGENE_DMA_RING_TYPE_REGULAR 0x01 |
137 | #define XGENE_DMA_RING_WQ_DESC_SIZE 32 /* 32 Bytes */ |
138 | #define XGENE_DMA_RING_NUM_CONFIG 5 |
139 | #define XGENE_DMA_MAX_CHANNEL 4 |
140 | #define XGENE_DMA_XOR_CHANNEL 0 |
141 | #define XGENE_DMA_PQ_CHANNEL 1 |
142 | #define XGENE_DMA_MAX_BYTE_CNT 0x4000 /* 16 KB */ |
143 | #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */ |
144 | #define XGENE_DMA_MAX_XOR_SRC 5 |
145 | #define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0 |
146 | #define XGENE_DMA_INVALID_LEN_CODE 0x7800000000000000ULL |
147 | |
148 | /* X-Gene DMA descriptor error codes */ |
149 | #define ERR_DESC_AXI 0x01 |
150 | #define ERR_BAD_DESC 0x02 |
151 | #define ERR_READ_DATA_AXI 0x03 |
152 | #define ERR_WRITE_DATA_AXI 0x04 |
153 | #define ERR_FBP_TIMEOUT 0x05 |
154 | #define ERR_ECC 0x06 |
155 | #define ERR_DIFF_SIZE 0x08 |
156 | #define ERR_SCT_GAT_LEN 0x09 |
157 | #define ERR_CRC_ERR 0x11 |
158 | #define ERR_CHKSUM 0x12 |
159 | #define ERR_DIF 0x13 |
160 | |
161 | /* X-Gene DMA error interrupt codes */ |
162 | #define ERR_DIF_SIZE_INT 0x0 |
163 | #define ERR_GS_ERR_INT 0x1 |
164 | #define ERR_FPB_TIMEO_INT 0x2 |
165 | #define ERR_WFIFO_OVF_INT 0x3 |
166 | #define ERR_RFIFO_OVF_INT 0x4 |
167 | #define ERR_WR_TIMEO_INT 0x5 |
168 | #define ERR_RD_TIMEO_INT 0x6 |
169 | #define ERR_WR_ERR_INT 0x7 |
170 | #define ERR_RD_ERR_INT 0x8 |
171 | #define ERR_BAD_DESC_INT 0x9 |
172 | #define ERR_DESC_DST_INT 0xA |
173 | #define ERR_DESC_SRC_INT 0xB |
174 | |
175 | /* X-Gene DMA flyby operation code */ |
176 | #define FLYBY_2SRC_XOR 0x80 |
177 | #define FLYBY_3SRC_XOR 0x90 |
178 | #define FLYBY_4SRC_XOR 0xA0 |
179 | #define FLYBY_5SRC_XOR 0xB0 |
180 | |
181 | /* X-Gene DMA SW descriptor flags */ |
182 | #define XGENE_DMA_FLAG_64B_DESC BIT(0) |
183 | |
184 | /* Define to dump X-Gene DMA descriptor */ |
185 | #define XGENE_DMA_DESC_DUMP(desc, m) \ |
186 | print_hex_dump(KERN_ERR, (m), \ |
187 | DUMP_PREFIX_ADDRESS, 16, 8, (desc), 32, 0) |
188 | |
189 | #define to_dma_desc_sw(tx) \ |
190 | container_of(tx, struct xgene_dma_desc_sw, tx) |
191 | #define to_dma_chan(dchan) \ |
192 | container_of(dchan, struct xgene_dma_chan, dma_chan) |
193 | |
194 | #define chan_dbg(chan, fmt, arg...) \ |
195 | dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg) |
196 | #define chan_err(chan, fmt, arg...) \ |
197 | dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) |
198 | |
199 | struct xgene_dma_desc_hw { |
200 | __le64 m0; |
201 | __le64 m1; |
202 | __le64 m2; |
203 | __le64 m3; |
204 | }; |
205 | |
206 | enum xgene_dma_ring_cfgsize { |
207 | XGENE_DMA_RING_CFG_SIZE_512B, |
208 | XGENE_DMA_RING_CFG_SIZE_2KB, |
209 | XGENE_DMA_RING_CFG_SIZE_16KB, |
210 | XGENE_DMA_RING_CFG_SIZE_64KB, |
211 | XGENE_DMA_RING_CFG_SIZE_512KB, |
212 | XGENE_DMA_RING_CFG_SIZE_INVALID |
213 | }; |
214 | |
215 | struct xgene_dma_ring { |
216 | struct xgene_dma *pdma; |
217 | u8 buf_num; |
218 | u16 id; |
219 | u16 num; |
220 | u16 head; |
221 | u16 owner; |
222 | u16 slots; |
223 | u16 dst_ring_num; |
224 | u32 size; |
225 | void __iomem *cmd; |
226 | void __iomem *cmd_base; |
227 | dma_addr_t desc_paddr; |
228 | u32 state[XGENE_DMA_RING_NUM_CONFIG]; |
229 | enum xgene_dma_ring_cfgsize cfgsize; |
230 | union { |
231 | void *desc_vaddr; |
232 | struct xgene_dma_desc_hw *desc_hw; |
233 | }; |
234 | }; |
235 | |
236 | struct xgene_dma_desc_sw { |
237 | struct xgene_dma_desc_hw desc1; |
238 | struct xgene_dma_desc_hw desc2; |
239 | u32 flags; |
240 | struct list_head node; |
241 | struct list_head tx_list; |
242 | struct dma_async_tx_descriptor tx; |
243 | }; |
244 | |
245 | /** |
246 | * struct xgene_dma_chan - internal representation of an X-Gene DMA channel |
247 | * @dma_chan: dmaengine channel object member |
248 | * @pdma: X-Gene DMA device structure reference |
249 | * @dev: struct device reference for dma mapping api |
250 | * @id: raw id of this channel |
251 | * @rx_irq: channel IRQ |
252 | * @name: name of X-Gene DMA channel |
253 | * @lock: serializes enqueue/dequeue operations to the descriptor pool |
254 | * @pending: number of transaction request pushed to DMA controller for |
255 | * execution, but still waiting for completion, |
256 | * @max_outstanding: max number of outstanding request we can push to channel |
257 | * @ld_pending: descriptors which are queued to run, but have not yet been |
258 | * submitted to the hardware for execution |
259 | * @ld_running: descriptors which are currently being executing by the hardware |
260 | * @ld_completed: descriptors which have finished execution by the hardware. |
261 | * These descriptors have already had their cleanup actions run. They |
262 | * are waiting for the ACK bit to be set by the async tx API. |
263 | * @desc_pool: descriptor pool for DMA operations |
264 | * @tasklet: bottom half where all completed descriptors cleans |
265 | * @tx_ring: transmit ring descriptor that we use to prepare actual |
266 | * descriptors for further executions |
267 | * @rx_ring: receive ring descriptor that we use to get completed DMA |
268 | * descriptors during cleanup time |
269 | */ |
270 | struct xgene_dma_chan { |
271 | struct dma_chan dma_chan; |
272 | struct xgene_dma *pdma; |
273 | struct device *dev; |
274 | int id; |
275 | int rx_irq; |
276 | char name[10]; |
277 | spinlock_t lock; |
278 | int pending; |
279 | int max_outstanding; |
280 | struct list_head ld_pending; |
281 | struct list_head ld_running; |
282 | struct list_head ld_completed; |
283 | struct dma_pool *desc_pool; |
284 | struct tasklet_struct tasklet; |
285 | struct xgene_dma_ring tx_ring; |
286 | struct xgene_dma_ring rx_ring; |
287 | }; |
288 | |
289 | /** |
290 | * struct xgene_dma - internal representation of an X-Gene DMA device |
291 | * @dev: reference to this device's struct device |
292 | * @clk: reference to this device's clock |
293 | * @err_irq: DMA error irq number |
294 | * @ring_num: start id number for DMA ring |
295 | * @csr_dma: base for DMA register access |
296 | * @csr_ring: base for DMA ring register access |
297 | * @csr_ring_cmd: base for DMA ring command register access |
298 | * @csr_efuse: base for efuse register access |
299 | * @dma_dev: embedded struct dma_device |
300 | * @chan: reference to X-Gene DMA channels |
301 | */ |
302 | struct xgene_dma { |
303 | struct device *dev; |
304 | struct clk *clk; |
305 | int err_irq; |
306 | int ring_num; |
307 | void __iomem *csr_dma; |
308 | void __iomem *csr_ring; |
309 | void __iomem *csr_ring_cmd; |
310 | void __iomem *csr_efuse; |
311 | struct dma_device dma_dev[XGENE_DMA_MAX_CHANNEL]; |
312 | struct xgene_dma_chan chan[XGENE_DMA_MAX_CHANNEL]; |
313 | }; |
314 | |
315 | static const char * const xgene_dma_desc_err[] = { |
316 | [ERR_DESC_AXI] = "AXI error when reading src/dst link list" , |
317 | [ERR_BAD_DESC] = "ERR or El_ERR fields not set to zero in desc" , |
318 | [ERR_READ_DATA_AXI] = "AXI error when reading data" , |
319 | [ERR_WRITE_DATA_AXI] = "AXI error when writing data" , |
320 | [ERR_FBP_TIMEOUT] = "Timeout on bufpool fetch" , |
321 | [ERR_ECC] = "ECC double bit error" , |
322 | [ERR_DIFF_SIZE] = "Bufpool too small to hold all the DIF result" , |
323 | [ERR_SCT_GAT_LEN] = "Gather and scatter data length not same" , |
324 | [ERR_CRC_ERR] = "CRC error" , |
325 | [ERR_CHKSUM] = "Checksum error" , |
326 | [ERR_DIF] = "DIF error" , |
327 | }; |
328 | |
329 | static const char * const xgene_dma_err[] = { |
330 | [ERR_DIF_SIZE_INT] = "DIF size error" , |
331 | [ERR_GS_ERR_INT] = "Gather scatter not same size error" , |
332 | [ERR_FPB_TIMEO_INT] = "Free pool time out error" , |
333 | [ERR_WFIFO_OVF_INT] = "Write FIFO over flow error" , |
334 | [ERR_RFIFO_OVF_INT] = "Read FIFO over flow error" , |
335 | [ERR_WR_TIMEO_INT] = "Write time out error" , |
336 | [ERR_RD_TIMEO_INT] = "Read time out error" , |
337 | [ERR_WR_ERR_INT] = "HBF bus write error" , |
338 | [ERR_RD_ERR_INT] = "HBF bus read error" , |
339 | [ERR_BAD_DESC_INT] = "Ring descriptor HE0 not set error" , |
340 | [ERR_DESC_DST_INT] = "HFB reading dst link address error" , |
341 | [ERR_DESC_SRC_INT] = "HFB reading src link address error" , |
342 | }; |
343 | |
344 | static bool is_pq_enabled(struct xgene_dma *pdma) |
345 | { |
346 | u32 val; |
347 | |
348 | val = ioread32(pdma->csr_efuse + XGENE_SOC_JTAG1_SHADOW); |
349 | return !(val & XGENE_DMA_PQ_DISABLE_MASK); |
350 | } |
351 | |
352 | static u64 xgene_dma_encode_len(size_t len) |
353 | { |
354 | return (len < XGENE_DMA_MAX_BYTE_CNT) ? |
355 | ((u64)len << XGENE_DMA_DESC_BUFLEN_POS) : |
356 | XGENE_DMA_16K_BUFFER_LEN_CODE; |
357 | } |
358 | |
359 | static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) |
360 | { |
361 | static u8 flyby_type[] = { |
362 | FLYBY_2SRC_XOR, /* Dummy */ |
363 | FLYBY_2SRC_XOR, /* Dummy */ |
364 | FLYBY_2SRC_XOR, |
365 | FLYBY_3SRC_XOR, |
366 | FLYBY_4SRC_XOR, |
367 | FLYBY_5SRC_XOR |
368 | }; |
369 | |
370 | return flyby_type[src_cnt]; |
371 | } |
372 | |
373 | static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, |
374 | dma_addr_t *paddr) |
375 | { |
376 | size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ? |
377 | *len : XGENE_DMA_MAX_BYTE_CNT; |
378 | |
379 | *ext8 |= cpu_to_le64(*paddr); |
380 | *ext8 |= cpu_to_le64(xgene_dma_encode_len(nbytes)); |
381 | *len -= nbytes; |
382 | *paddr += nbytes; |
383 | } |
384 | |
385 | static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx) |
386 | { |
387 | switch (idx) { |
388 | case 0: |
389 | return &desc->m1; |
390 | case 1: |
391 | return &desc->m0; |
392 | case 2: |
393 | return &desc->m3; |
394 | case 3: |
395 | return &desc->m2; |
396 | default: |
397 | pr_err("Invalid dma descriptor index\n" ); |
398 | } |
399 | |
400 | return NULL; |
401 | } |
402 | |
403 | static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc, |
404 | u16 dst_ring_num) |
405 | { |
406 | desc->m0 |= cpu_to_le64(XGENE_DMA_DESC_IN_BIT); |
407 | desc->m0 |= cpu_to_le64((u64)XGENE_DMA_RING_OWNER_DMA << |
408 | XGENE_DMA_DESC_RTYPE_POS); |
409 | desc->m1 |= cpu_to_le64(XGENE_DMA_DESC_C_BIT); |
410 | desc->m3 |= cpu_to_le64((u64)dst_ring_num << |
411 | XGENE_DMA_DESC_HOENQ_NUM_POS); |
412 | } |
413 | |
414 | static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, |
415 | struct xgene_dma_desc_sw *desc_sw, |
416 | dma_addr_t *dst, dma_addr_t *src, |
417 | u32 src_cnt, size_t *nbytes, |
418 | const u8 *scf) |
419 | { |
420 | struct xgene_dma_desc_hw *desc1, *desc2; |
421 | size_t len = *nbytes; |
422 | int i; |
423 | |
424 | desc1 = &desc_sw->desc1; |
425 | desc2 = &desc_sw->desc2; |
426 | |
427 | /* Initialize DMA descriptor */ |
428 | xgene_dma_init_desc(desc: desc1, dst_ring_num: chan->tx_ring.dst_ring_num); |
429 | |
430 | /* Set destination address */ |
431 | desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT); |
432 | desc1->m3 |= cpu_to_le64(*dst); |
433 | |
434 | /* We have multiple source addresses, so need to set NV bit*/ |
435 | desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT); |
436 | |
437 | /* Set flyby opcode */ |
438 | desc1->m2 |= cpu_to_le64(xgene_dma_encode_xor_flyby(src_cnt)); |
439 | |
440 | /* Set 1st to 5th source addresses */ |
441 | for (i = 0; i < src_cnt; i++) { |
442 | len = *nbytes; |
443 | xgene_dma_set_src_buffer(ext8: (i == 0) ? &desc1->m1 : |
444 | xgene_dma_lookup_ext8(desc: desc2, idx: i - 1), |
445 | len: &len, paddr: &src[i]); |
446 | desc1->m2 |= cpu_to_le64((scf[i] << ((i + 1) * 8))); |
447 | } |
448 | |
449 | /* Update meta data */ |
450 | *nbytes = len; |
451 | *dst += XGENE_DMA_MAX_BYTE_CNT; |
452 | |
453 | /* We need always 64B descriptor to perform xor or pq operations */ |
454 | desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; |
455 | } |
456 | |
457 | static dma_cookie_t xgene_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
458 | { |
459 | struct xgene_dma_desc_sw *desc; |
460 | struct xgene_dma_chan *chan; |
461 | dma_cookie_t cookie; |
462 | |
463 | if (unlikely(!tx)) |
464 | return -EINVAL; |
465 | |
466 | chan = to_dma_chan(tx->chan); |
467 | desc = to_dma_desc_sw(tx); |
468 | |
469 | spin_lock_bh(lock: &chan->lock); |
470 | |
471 | cookie = dma_cookie_assign(tx); |
472 | |
473 | /* Add this transaction list onto the tail of the pending queue */ |
474 | list_splice_tail_init(list: &desc->tx_list, head: &chan->ld_pending); |
475 | |
476 | spin_unlock_bh(lock: &chan->lock); |
477 | |
478 | return cookie; |
479 | } |
480 | |
481 | static void xgene_dma_clean_descriptor(struct xgene_dma_chan *chan, |
482 | struct xgene_dma_desc_sw *desc) |
483 | { |
484 | list_del(entry: &desc->node); |
485 | chan_dbg(chan, "LD %p free\n" , desc); |
486 | dma_pool_free(pool: chan->desc_pool, vaddr: desc, addr: desc->tx.phys); |
487 | } |
488 | |
489 | static struct xgene_dma_desc_sw *xgene_dma_alloc_descriptor( |
490 | struct xgene_dma_chan *chan) |
491 | { |
492 | struct xgene_dma_desc_sw *desc; |
493 | dma_addr_t phys; |
494 | |
495 | desc = dma_pool_zalloc(pool: chan->desc_pool, GFP_NOWAIT, handle: &phys); |
496 | if (!desc) { |
497 | chan_err(chan, "Failed to allocate LDs\n" ); |
498 | return NULL; |
499 | } |
500 | |
501 | INIT_LIST_HEAD(list: &desc->tx_list); |
502 | desc->tx.phys = phys; |
503 | desc->tx.tx_submit = xgene_dma_tx_submit; |
504 | dma_async_tx_descriptor_init(tx: &desc->tx, chan: &chan->dma_chan); |
505 | |
506 | chan_dbg(chan, "LD %p allocated\n" , desc); |
507 | |
508 | return desc; |
509 | } |
510 | |
511 | /** |
512 | * xgene_dma_clean_completed_descriptor - free all descriptors which |
513 | * has been completed and acked |
514 | * @chan: X-Gene DMA channel |
515 | * |
516 | * This function is used on all completed and acked descriptors. |
517 | */ |
518 | static void xgene_dma_clean_completed_descriptor(struct xgene_dma_chan *chan) |
519 | { |
520 | struct xgene_dma_desc_sw *desc, *_desc; |
521 | |
522 | /* Run the callback for each descriptor, in order */ |
523 | list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) { |
524 | if (async_tx_test_ack(tx: &desc->tx)) |
525 | xgene_dma_clean_descriptor(chan, desc); |
526 | } |
527 | } |
528 | |
529 | /** |
530 | * xgene_dma_run_tx_complete_actions - cleanup a single link descriptor |
531 | * @chan: X-Gene DMA channel |
532 | * @desc: descriptor to cleanup and free |
533 | * |
534 | * This function is used on a descriptor which has been executed by the DMA |
535 | * controller. It will run any callbacks, submit any dependencies. |
536 | */ |
537 | static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan, |
538 | struct xgene_dma_desc_sw *desc) |
539 | { |
540 | struct dma_async_tx_descriptor *tx = &desc->tx; |
541 | |
542 | /* |
543 | * If this is not the last transaction in the group, |
544 | * then no need to complete cookie and run any callback as |
545 | * this is not the tx_descriptor which had been sent to caller |
546 | * of this DMA request |
547 | */ |
548 | |
549 | if (tx->cookie == 0) |
550 | return; |
551 | |
552 | dma_cookie_complete(tx); |
553 | dma_descriptor_unmap(tx); |
554 | |
555 | /* Run the link descriptor callback function */ |
556 | dmaengine_desc_get_callback_invoke(tx, NULL); |
557 | |
558 | /* Run any dependencies */ |
559 | dma_run_dependencies(tx); |
560 | } |
561 | |
562 | /** |
563 | * xgene_dma_clean_running_descriptor - move the completed descriptor from |
564 | * ld_running to ld_completed |
565 | * @chan: X-Gene DMA channel |
566 | * @desc: the descriptor which is completed |
567 | * |
568 | * Free the descriptor directly if acked by async_tx api, |
569 | * else move it to queue ld_completed. |
570 | */ |
571 | static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan, |
572 | struct xgene_dma_desc_sw *desc) |
573 | { |
574 | /* Remove from the list of running transactions */ |
575 | list_del(entry: &desc->node); |
576 | |
577 | /* |
578 | * the client is allowed to attach dependent operations |
579 | * until 'ack' is set |
580 | */ |
581 | if (!async_tx_test_ack(tx: &desc->tx)) { |
582 | /* |
583 | * Move this descriptor to the list of descriptors which is |
584 | * completed, but still awaiting the 'ack' bit to be set. |
585 | */ |
586 | list_add_tail(new: &desc->node, head: &chan->ld_completed); |
587 | return; |
588 | } |
589 | |
590 | chan_dbg(chan, "LD %p free\n" , desc); |
591 | dma_pool_free(pool: chan->desc_pool, vaddr: desc, addr: desc->tx.phys); |
592 | } |
593 | |
594 | static void xgene_chan_xfer_request(struct xgene_dma_chan *chan, |
595 | struct xgene_dma_desc_sw *desc_sw) |
596 | { |
597 | struct xgene_dma_ring *ring = &chan->tx_ring; |
598 | struct xgene_dma_desc_hw *desc_hw; |
599 | |
600 | /* Get hw descriptor from DMA tx ring */ |
601 | desc_hw = &ring->desc_hw[ring->head]; |
602 | |
603 | /* |
604 | * Increment the head count to point next |
605 | * descriptor for next time |
606 | */ |
607 | if (++ring->head == ring->slots) |
608 | ring->head = 0; |
609 | |
610 | /* Copy prepared sw descriptor data to hw descriptor */ |
611 | memcpy(desc_hw, &desc_sw->desc1, sizeof(*desc_hw)); |
612 | |
613 | /* |
614 | * Check if we have prepared 64B descriptor, |
615 | * in this case we need one more hw descriptor |
616 | */ |
617 | if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) { |
618 | desc_hw = &ring->desc_hw[ring->head]; |
619 | |
620 | if (++ring->head == ring->slots) |
621 | ring->head = 0; |
622 | |
623 | memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); |
624 | } |
625 | |
626 | /* Increment the pending transaction count */ |
627 | chan->pending += ((desc_sw->flags & |
628 | XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); |
629 | |
630 | /* Notify the hw that we have descriptor ready for execution */ |
631 | iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? |
632 | 2 : 1, ring->cmd); |
633 | } |
634 | |
635 | /** |
636 | * xgene_chan_xfer_ld_pending - push any pending transactions to hw |
637 | * @chan : X-Gene DMA channel |
638 | * |
639 | * LOCKING: must hold chan->lock |
640 | */ |
641 | static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) |
642 | { |
643 | struct xgene_dma_desc_sw *desc_sw, *_desc_sw; |
644 | |
645 | /* |
646 | * If the list of pending descriptors is empty, then we |
647 | * don't need to do any work at all |
648 | */ |
649 | if (list_empty(head: &chan->ld_pending)) { |
650 | chan_dbg(chan, "No pending LDs\n" ); |
651 | return; |
652 | } |
653 | |
654 | /* |
655 | * Move elements from the queue of pending transactions onto the list |
656 | * of running transactions and push it to hw for further executions |
657 | */ |
658 | list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_pending, node) { |
659 | /* |
660 | * Check if have pushed max number of transactions to hw |
661 | * as capable, so let's stop here and will push remaining |
662 | * elements from pening ld queue after completing some |
663 | * descriptors that we have already pushed |
664 | */ |
665 | if (chan->pending >= chan->max_outstanding) |
666 | return; |
667 | |
668 | xgene_chan_xfer_request(chan, desc_sw); |
669 | |
670 | /* |
671 | * Delete this element from ld pending queue and append it to |
672 | * ld running queue |
673 | */ |
674 | list_move_tail(list: &desc_sw->node, head: &chan->ld_running); |
675 | } |
676 | } |
677 | |
678 | /** |
679 | * xgene_dma_cleanup_descriptors - cleanup link descriptors which are completed |
680 | * and move them to ld_completed to free until flag 'ack' is set |
681 | * @chan: X-Gene DMA channel |
682 | * |
683 | * This function is used on descriptors which have been executed by the DMA |
684 | * controller. It will run any callbacks, submit any dependencies, then |
685 | * free these descriptors if flag 'ack' is set. |
686 | */ |
687 | static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) |
688 | { |
689 | struct xgene_dma_ring *ring = &chan->rx_ring; |
690 | struct xgene_dma_desc_sw *desc_sw, *_desc_sw; |
691 | struct xgene_dma_desc_hw *desc_hw; |
692 | struct list_head ld_completed; |
693 | u8 status; |
694 | |
695 | INIT_LIST_HEAD(list: &ld_completed); |
696 | |
697 | spin_lock(lock: &chan->lock); |
698 | |
699 | /* Clean already completed and acked descriptors */ |
700 | xgene_dma_clean_completed_descriptor(chan); |
701 | |
702 | /* Move all completed descriptors to ld completed queue, in order */ |
703 | list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) { |
704 | /* Get subsequent hw descriptor from DMA rx ring */ |
705 | desc_hw = &ring->desc_hw[ring->head]; |
706 | |
707 | /* Check if this descriptor has been completed */ |
708 | if (unlikely(le64_to_cpu(desc_hw->m0) == |
709 | XGENE_DMA_DESC_EMPTY_SIGNATURE)) |
710 | break; |
711 | |
712 | if (++ring->head == ring->slots) |
713 | ring->head = 0; |
714 | |
715 | /* Check if we have any error with DMA transactions */ |
716 | status = XGENE_DMA_DESC_STATUS( |
717 | XGENE_DMA_DESC_ELERR_RD(le64_to_cpu( |
718 | desc_hw->m0)), |
719 | XGENE_DMA_DESC_LERR_RD(le64_to_cpu( |
720 | desc_hw->m0))); |
721 | if (status) { |
722 | /* Print the DMA error type */ |
723 | chan_err(chan, "%s\n" , xgene_dma_desc_err[status]); |
724 | |
725 | /* |
726 | * We have DMA transactions error here. Dump DMA Tx |
727 | * and Rx descriptors for this request */ |
728 | XGENE_DMA_DESC_DUMP(&desc_sw->desc1, |
729 | "X-Gene DMA TX DESC1: " ); |
730 | |
731 | if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) |
732 | XGENE_DMA_DESC_DUMP(&desc_sw->desc2, |
733 | "X-Gene DMA TX DESC2: " ); |
734 | |
735 | XGENE_DMA_DESC_DUMP(desc_hw, |
736 | "X-Gene DMA RX ERR DESC: " ); |
737 | } |
738 | |
739 | /* Notify the hw about this completed descriptor */ |
740 | iowrite32(-1, ring->cmd); |
741 | |
742 | /* Mark this hw descriptor as processed */ |
743 | desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); |
744 | |
745 | /* |
746 | * Decrement the pending transaction count |
747 | * as we have processed one |
748 | */ |
749 | chan->pending -= ((desc_sw->flags & |
750 | XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); |
751 | |
752 | /* |
753 | * Delete this node from ld running queue and append it to |
754 | * ld completed queue for further processing |
755 | */ |
756 | list_move_tail(list: &desc_sw->node, head: &ld_completed); |
757 | } |
758 | |
759 | /* |
760 | * Start any pending transactions automatically |
761 | * In the ideal case, we keep the DMA controller busy while we go |
762 | * ahead and free the descriptors below. |
763 | */ |
764 | xgene_chan_xfer_ld_pending(chan); |
765 | |
766 | spin_unlock(lock: &chan->lock); |
767 | |
768 | /* Run the callback for each descriptor, in order */ |
769 | list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) { |
770 | xgene_dma_run_tx_complete_actions(chan, desc: desc_sw); |
771 | xgene_dma_clean_running_descriptor(chan, desc: desc_sw); |
772 | } |
773 | } |
774 | |
775 | static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan) |
776 | { |
777 | struct xgene_dma_chan *chan = to_dma_chan(dchan); |
778 | |
779 | /* Has this channel already been allocated? */ |
780 | if (chan->desc_pool) |
781 | return 1; |
782 | |
783 | chan->desc_pool = dma_pool_create(name: chan->name, dev: chan->dev, |
784 | size: sizeof(struct xgene_dma_desc_sw), |
785 | align: 0, allocation: 0); |
786 | if (!chan->desc_pool) { |
787 | chan_err(chan, "Failed to allocate descriptor pool\n" ); |
788 | return -ENOMEM; |
789 | } |
790 | |
791 | chan_dbg(chan, "Allocate descriptor pool\n" ); |
792 | |
793 | return 1; |
794 | } |
795 | |
796 | /** |
797 | * xgene_dma_free_desc_list - Free all descriptors in a queue |
798 | * @chan: X-Gene DMA channel |
799 | * @list: the list to free |
800 | * |
801 | * LOCKING: must hold chan->lock |
802 | */ |
803 | static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan, |
804 | struct list_head *list) |
805 | { |
806 | struct xgene_dma_desc_sw *desc, *_desc; |
807 | |
808 | list_for_each_entry_safe(desc, _desc, list, node) |
809 | xgene_dma_clean_descriptor(chan, desc); |
810 | } |
811 | |
812 | static void xgene_dma_free_chan_resources(struct dma_chan *dchan) |
813 | { |
814 | struct xgene_dma_chan *chan = to_dma_chan(dchan); |
815 | |
816 | chan_dbg(chan, "Free all resources\n" ); |
817 | |
818 | if (!chan->desc_pool) |
819 | return; |
820 | |
821 | /* Process all running descriptor */ |
822 | xgene_dma_cleanup_descriptors(chan); |
823 | |
824 | spin_lock_bh(lock: &chan->lock); |
825 | |
826 | /* Clean all link descriptor queues */ |
827 | xgene_dma_free_desc_list(chan, list: &chan->ld_pending); |
828 | xgene_dma_free_desc_list(chan, list: &chan->ld_running); |
829 | xgene_dma_free_desc_list(chan, list: &chan->ld_completed); |
830 | |
831 | spin_unlock_bh(lock: &chan->lock); |
832 | |
833 | /* Delete this channel DMA pool */ |
834 | dma_pool_destroy(pool: chan->desc_pool); |
835 | chan->desc_pool = NULL; |
836 | } |
837 | |
838 | static struct dma_async_tx_descriptor *xgene_dma_prep_xor( |
839 | struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src, |
840 | u32 src_cnt, size_t len, unsigned long flags) |
841 | { |
842 | struct xgene_dma_desc_sw *first = NULL, *new; |
843 | struct xgene_dma_chan *chan; |
844 | static u8 multi[XGENE_DMA_MAX_XOR_SRC] = { |
845 | 0x01, 0x01, 0x01, 0x01, 0x01}; |
846 | |
847 | if (unlikely(!dchan || !len)) |
848 | return NULL; |
849 | |
850 | chan = to_dma_chan(dchan); |
851 | |
852 | do { |
853 | /* Allocate the link descriptor from DMA pool */ |
854 | new = xgene_dma_alloc_descriptor(chan); |
855 | if (!new) |
856 | goto fail; |
857 | |
858 | /* Prepare xor DMA descriptor */ |
859 | xgene_dma_prep_xor_desc(chan, desc_sw: new, dst: &dst, src, |
860 | src_cnt, nbytes: &len, scf: multi); |
861 | |
862 | if (!first) |
863 | first = new; |
864 | |
865 | new->tx.cookie = 0; |
866 | async_tx_ack(tx: &new->tx); |
867 | |
868 | /* Insert the link descriptor to the LD ring */ |
869 | list_add_tail(new: &new->node, head: &first->tx_list); |
870 | } while (len); |
871 | |
872 | new->tx.flags = flags; /* client is in control of this ack */ |
873 | new->tx.cookie = -EBUSY; |
874 | list_splice(list: &first->tx_list, head: &new->tx_list); |
875 | |
876 | return &new->tx; |
877 | |
878 | fail: |
879 | if (!first) |
880 | return NULL; |
881 | |
882 | xgene_dma_free_desc_list(chan, list: &first->tx_list); |
883 | return NULL; |
884 | } |
885 | |
886 | static struct dma_async_tx_descriptor *xgene_dma_prep_pq( |
887 | struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src, |
888 | u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) |
889 | { |
890 | struct xgene_dma_desc_sw *first = NULL, *new; |
891 | struct xgene_dma_chan *chan; |
892 | size_t _len = len; |
893 | dma_addr_t _src[XGENE_DMA_MAX_XOR_SRC]; |
894 | static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {0x01, 0x01, 0x01, 0x01, 0x01}; |
895 | |
896 | if (unlikely(!dchan || !len)) |
897 | return NULL; |
898 | |
899 | chan = to_dma_chan(dchan); |
900 | |
901 | /* |
902 | * Save source addresses on local variable, may be we have to |
903 | * prepare two descriptor to generate P and Q if both enabled |
904 | * in the flags by client |
905 | */ |
906 | memcpy(_src, src, sizeof(*src) * src_cnt); |
907 | |
908 | if (flags & DMA_PREP_PQ_DISABLE_P) |
909 | len = 0; |
910 | |
911 | if (flags & DMA_PREP_PQ_DISABLE_Q) |
912 | _len = 0; |
913 | |
914 | do { |
915 | /* Allocate the link descriptor from DMA pool */ |
916 | new = xgene_dma_alloc_descriptor(chan); |
917 | if (!new) |
918 | goto fail; |
919 | |
920 | if (!first) |
921 | first = new; |
922 | |
923 | new->tx.cookie = 0; |
924 | async_tx_ack(tx: &new->tx); |
925 | |
926 | /* Insert the link descriptor to the LD ring */ |
927 | list_add_tail(new: &new->node, head: &first->tx_list); |
928 | |
929 | /* |
930 | * Prepare DMA descriptor to generate P, |
931 | * if DMA_PREP_PQ_DISABLE_P flag is not set |
932 | */ |
933 | if (len) { |
934 | xgene_dma_prep_xor_desc(chan, desc_sw: new, dst: &dst[0], src, |
935 | src_cnt, nbytes: &len, scf: multi); |
936 | continue; |
937 | } |
938 | |
939 | /* |
940 | * Prepare DMA descriptor to generate Q, |
941 | * if DMA_PREP_PQ_DISABLE_Q flag is not set |
942 | */ |
943 | if (_len) { |
944 | xgene_dma_prep_xor_desc(chan, desc_sw: new, dst: &dst[1], src: _src, |
945 | src_cnt, nbytes: &_len, scf); |
946 | } |
947 | } while (len || _len); |
948 | |
949 | new->tx.flags = flags; /* client is in control of this ack */ |
950 | new->tx.cookie = -EBUSY; |
951 | list_splice(list: &first->tx_list, head: &new->tx_list); |
952 | |
953 | return &new->tx; |
954 | |
955 | fail: |
956 | if (!first) |
957 | return NULL; |
958 | |
959 | xgene_dma_free_desc_list(chan, list: &first->tx_list); |
960 | return NULL; |
961 | } |
962 | |
963 | static void xgene_dma_issue_pending(struct dma_chan *dchan) |
964 | { |
965 | struct xgene_dma_chan *chan = to_dma_chan(dchan); |
966 | |
967 | spin_lock_bh(lock: &chan->lock); |
968 | xgene_chan_xfer_ld_pending(chan); |
969 | spin_unlock_bh(lock: &chan->lock); |
970 | } |
971 | |
972 | static enum dma_status xgene_dma_tx_status(struct dma_chan *dchan, |
973 | dma_cookie_t cookie, |
974 | struct dma_tx_state *txstate) |
975 | { |
976 | return dma_cookie_status(chan: dchan, cookie, state: txstate); |
977 | } |
978 | |
979 | static void xgene_dma_tasklet_cb(struct tasklet_struct *t) |
980 | { |
981 | struct xgene_dma_chan *chan = from_tasklet(chan, t, tasklet); |
982 | |
983 | /* Run all cleanup for descriptors which have been completed */ |
984 | xgene_dma_cleanup_descriptors(chan); |
985 | |
986 | /* Re-enable DMA channel IRQ */ |
987 | enable_irq(irq: chan->rx_irq); |
988 | } |
989 | |
990 | static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id) |
991 | { |
992 | struct xgene_dma_chan *chan = (struct xgene_dma_chan *)id; |
993 | |
994 | BUG_ON(!chan); |
995 | |
996 | /* |
997 | * Disable DMA channel IRQ until we process completed |
998 | * descriptors |
999 | */ |
1000 | disable_irq_nosync(irq: chan->rx_irq); |
1001 | |
1002 | /* |
1003 | * Schedule the tasklet to handle all cleanup of the current |
1004 | * transaction. It will start a new transaction if there is |
1005 | * one pending. |
1006 | */ |
1007 | tasklet_schedule(t: &chan->tasklet); |
1008 | |
1009 | return IRQ_HANDLED; |
1010 | } |
1011 | |
1012 | static irqreturn_t xgene_dma_err_isr(int irq, void *id) |
1013 | { |
1014 | struct xgene_dma *pdma = (struct xgene_dma *)id; |
1015 | unsigned long int_mask; |
1016 | u32 val, i; |
1017 | |
1018 | val = ioread32(pdma->csr_dma + XGENE_DMA_INT); |
1019 | |
1020 | /* Clear DMA interrupts */ |
1021 | iowrite32(val, pdma->csr_dma + XGENE_DMA_INT); |
1022 | |
1023 | /* Print DMA error info */ |
1024 | int_mask = val >> XGENE_DMA_INT_MASK_SHIFT; |
1025 | for_each_set_bit(i, &int_mask, ARRAY_SIZE(xgene_dma_err)) |
1026 | dev_err(pdma->dev, |
1027 | "Interrupt status 0x%08X %s\n" , val, xgene_dma_err[i]); |
1028 | |
1029 | return IRQ_HANDLED; |
1030 | } |
1031 | |
1032 | static void xgene_dma_wr_ring_state(struct xgene_dma_ring *ring) |
1033 | { |
1034 | int i; |
1035 | |
1036 | iowrite32(ring->num, ring->pdma->csr_ring + XGENE_DMA_RING_STATE); |
1037 | |
1038 | for (i = 0; i < XGENE_DMA_RING_NUM_CONFIG; i++) |
1039 | iowrite32(ring->state[i], ring->pdma->csr_ring + |
1040 | XGENE_DMA_RING_STATE_WR_BASE + (i * 4)); |
1041 | } |
1042 | |
1043 | static void xgene_dma_clr_ring_state(struct xgene_dma_ring *ring) |
1044 | { |
1045 | memset(ring->state, 0, sizeof(u32) * XGENE_DMA_RING_NUM_CONFIG); |
1046 | xgene_dma_wr_ring_state(ring); |
1047 | } |
1048 | |
1049 | static void xgene_dma_setup_ring(struct xgene_dma_ring *ring) |
1050 | { |
1051 | void *ring_cfg = ring->state; |
1052 | u64 addr = ring->desc_paddr; |
1053 | u32 i, val; |
1054 | |
1055 | ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE; |
1056 | |
1057 | /* Clear DMA ring state */ |
1058 | xgene_dma_clr_ring_state(ring); |
1059 | |
1060 | /* Set DMA ring type */ |
1061 | XGENE_DMA_RING_TYPE_SET(ring_cfg, XGENE_DMA_RING_TYPE_REGULAR); |
1062 | |
1063 | if (ring->owner == XGENE_DMA_RING_OWNER_DMA) { |
1064 | /* Set recombination buffer and timeout */ |
1065 | XGENE_DMA_RING_RECOMBBUF_SET(ring_cfg); |
1066 | XGENE_DMA_RING_RECOMTIMEOUTL_SET(ring_cfg); |
1067 | XGENE_DMA_RING_RECOMTIMEOUTH_SET(ring_cfg); |
1068 | } |
1069 | |
1070 | /* Initialize DMA ring state */ |
1071 | XGENE_DMA_RING_SELTHRSH_SET(ring_cfg); |
1072 | XGENE_DMA_RING_ACCEPTLERR_SET(ring_cfg); |
1073 | XGENE_DMA_RING_COHERENT_SET(ring_cfg); |
1074 | XGENE_DMA_RING_ADDRL_SET(ring_cfg, addr); |
1075 | XGENE_DMA_RING_ADDRH_SET(ring_cfg, addr); |
1076 | XGENE_DMA_RING_SIZE_SET(ring_cfg, ring->cfgsize); |
1077 | |
1078 | /* Write DMA ring configurations */ |
1079 | xgene_dma_wr_ring_state(ring); |
1080 | |
1081 | /* Set DMA ring id */ |
1082 | iowrite32(XGENE_DMA_RING_ID_SETUP(ring->id), |
1083 | ring->pdma->csr_ring + XGENE_DMA_RING_ID); |
1084 | |
1085 | /* Set DMA ring buffer */ |
1086 | iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring->num), |
1087 | ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF); |
1088 | |
1089 | if (ring->owner != XGENE_DMA_RING_OWNER_CPU) |
1090 | return; |
1091 | |
1092 | /* Set empty signature to DMA Rx ring descriptors */ |
1093 | for (i = 0; i < ring->slots; i++) { |
1094 | struct xgene_dma_desc_hw *desc; |
1095 | |
1096 | desc = &ring->desc_hw[i]; |
1097 | desc->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); |
1098 | } |
1099 | |
1100 | /* Enable DMA Rx ring interrupt */ |
1101 | val = ioread32(ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE); |
1102 | XGENE_DMA_RING_NE_INT_MODE_SET(val, ring->buf_num); |
1103 | iowrite32(val, ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE); |
1104 | } |
1105 | |
1106 | static void xgene_dma_clear_ring(struct xgene_dma_ring *ring) |
1107 | { |
1108 | u32 ring_id, val; |
1109 | |
1110 | if (ring->owner == XGENE_DMA_RING_OWNER_CPU) { |
1111 | /* Disable DMA Rx ring interrupt */ |
1112 | val = ioread32(ring->pdma->csr_ring + |
1113 | XGENE_DMA_RING_NE_INT_MODE); |
1114 | XGENE_DMA_RING_NE_INT_MODE_RESET(val, ring->buf_num); |
1115 | iowrite32(val, ring->pdma->csr_ring + |
1116 | XGENE_DMA_RING_NE_INT_MODE); |
1117 | } |
1118 | |
1119 | /* Clear DMA ring state */ |
1120 | ring_id = XGENE_DMA_RING_ID_SETUP(ring->id); |
1121 | iowrite32(ring_id, ring->pdma->csr_ring + XGENE_DMA_RING_ID); |
1122 | |
1123 | iowrite32(0, ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF); |
1124 | xgene_dma_clr_ring_state(ring); |
1125 | } |
1126 | |
1127 | static void xgene_dma_set_ring_cmd(struct xgene_dma_ring *ring) |
1128 | { |
1129 | ring->cmd_base = ring->pdma->csr_ring_cmd + |
1130 | XGENE_DMA_RING_CMD_BASE_OFFSET((ring->num - |
1131 | XGENE_DMA_RING_NUM)); |
1132 | |
1133 | ring->cmd = ring->cmd_base + XGENE_DMA_RING_CMD_OFFSET; |
1134 | } |
1135 | |
1136 | static int xgene_dma_get_ring_size(struct xgene_dma_chan *chan, |
1137 | enum xgene_dma_ring_cfgsize cfgsize) |
1138 | { |
1139 | int size; |
1140 | |
1141 | switch (cfgsize) { |
1142 | case XGENE_DMA_RING_CFG_SIZE_512B: |
1143 | size = 0x200; |
1144 | break; |
1145 | case XGENE_DMA_RING_CFG_SIZE_2KB: |
1146 | size = 0x800; |
1147 | break; |
1148 | case XGENE_DMA_RING_CFG_SIZE_16KB: |
1149 | size = 0x4000; |
1150 | break; |
1151 | case XGENE_DMA_RING_CFG_SIZE_64KB: |
1152 | size = 0x10000; |
1153 | break; |
1154 | case XGENE_DMA_RING_CFG_SIZE_512KB: |
1155 | size = 0x80000; |
1156 | break; |
1157 | default: |
1158 | chan_err(chan, "Unsupported cfg ring size %d\n" , cfgsize); |
1159 | return -EINVAL; |
1160 | } |
1161 | |
1162 | return size; |
1163 | } |
1164 | |
1165 | static void xgene_dma_delete_ring_one(struct xgene_dma_ring *ring) |
1166 | { |
1167 | /* Clear DMA ring configurations */ |
1168 | xgene_dma_clear_ring(ring); |
1169 | |
1170 | /* De-allocate DMA ring descriptor */ |
1171 | if (ring->desc_vaddr) { |
1172 | dma_free_coherent(dev: ring->pdma->dev, size: ring->size, |
1173 | cpu_addr: ring->desc_vaddr, dma_handle: ring->desc_paddr); |
1174 | ring->desc_vaddr = NULL; |
1175 | } |
1176 | } |
1177 | |
1178 | static void xgene_dma_delete_chan_rings(struct xgene_dma_chan *chan) |
1179 | { |
1180 | xgene_dma_delete_ring_one(ring: &chan->rx_ring); |
1181 | xgene_dma_delete_ring_one(ring: &chan->tx_ring); |
1182 | } |
1183 | |
1184 | static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, |
1185 | struct xgene_dma_ring *ring, |
1186 | enum xgene_dma_ring_cfgsize cfgsize) |
1187 | { |
1188 | int ret; |
1189 | |
1190 | /* Setup DMA ring descriptor variables */ |
1191 | ring->pdma = chan->pdma; |
1192 | ring->cfgsize = cfgsize; |
1193 | ring->num = chan->pdma->ring_num++; |
1194 | ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); |
1195 | |
1196 | ret = xgene_dma_get_ring_size(chan, cfgsize); |
1197 | if (ret <= 0) |
1198 | return ret; |
1199 | ring->size = ret; |
1200 | |
1201 | /* Allocate memory for DMA ring descriptor */ |
1202 | ring->desc_vaddr = dma_alloc_coherent(dev: chan->dev, size: ring->size, |
1203 | dma_handle: &ring->desc_paddr, GFP_KERNEL); |
1204 | if (!ring->desc_vaddr) { |
1205 | chan_err(chan, "Failed to allocate ring desc\n" ); |
1206 | return -ENOMEM; |
1207 | } |
1208 | |
1209 | /* Configure and enable DMA ring */ |
1210 | xgene_dma_set_ring_cmd(ring); |
1211 | xgene_dma_setup_ring(ring); |
1212 | |
1213 | return 0; |
1214 | } |
1215 | |
1216 | static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) |
1217 | { |
1218 | struct xgene_dma_ring *rx_ring = &chan->rx_ring; |
1219 | struct xgene_dma_ring *tx_ring = &chan->tx_ring; |
1220 | int ret; |
1221 | |
1222 | /* Create DMA Rx ring descriptor */ |
1223 | rx_ring->owner = XGENE_DMA_RING_OWNER_CPU; |
1224 | rx_ring->buf_num = XGENE_DMA_CPU_BUFNUM + chan->id; |
1225 | |
1226 | ret = xgene_dma_create_ring_one(chan, ring: rx_ring, |
1227 | cfgsize: XGENE_DMA_RING_CFG_SIZE_64KB); |
1228 | if (ret) |
1229 | return ret; |
1230 | |
1231 | chan_dbg(chan, "Rx ring id 0x%X num %d desc 0x%p\n" , |
1232 | rx_ring->id, rx_ring->num, rx_ring->desc_vaddr); |
1233 | |
1234 | /* Create DMA Tx ring descriptor */ |
1235 | tx_ring->owner = XGENE_DMA_RING_OWNER_DMA; |
1236 | tx_ring->buf_num = XGENE_DMA_BUFNUM + chan->id; |
1237 | |
1238 | ret = xgene_dma_create_ring_one(chan, ring: tx_ring, |
1239 | cfgsize: XGENE_DMA_RING_CFG_SIZE_64KB); |
1240 | if (ret) { |
1241 | xgene_dma_delete_ring_one(ring: rx_ring); |
1242 | return ret; |
1243 | } |
1244 | |
1245 | tx_ring->dst_ring_num = XGENE_DMA_RING_DST_ID(rx_ring->num); |
1246 | |
1247 | chan_dbg(chan, |
1248 | "Tx ring id 0x%X num %d desc 0x%p\n" , |
1249 | tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); |
1250 | |
1251 | /* Set the max outstanding request possible to this channel */ |
1252 | chan->max_outstanding = tx_ring->slots; |
1253 | |
1254 | return ret; |
1255 | } |
1256 | |
1257 | static int xgene_dma_init_rings(struct xgene_dma *pdma) |
1258 | { |
1259 | int ret, i, j; |
1260 | |
1261 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { |
1262 | ret = xgene_dma_create_chan_rings(chan: &pdma->chan[i]); |
1263 | if (ret) { |
1264 | for (j = 0; j < i; j++) |
1265 | xgene_dma_delete_chan_rings(chan: &pdma->chan[j]); |
1266 | return ret; |
1267 | } |
1268 | } |
1269 | |
1270 | return ret; |
1271 | } |
1272 | |
1273 | static void xgene_dma_enable(struct xgene_dma *pdma) |
1274 | { |
1275 | u32 val; |
1276 | |
1277 | /* Configure and enable DMA engine */ |
1278 | val = ioread32(pdma->csr_dma + XGENE_DMA_GCR); |
1279 | XGENE_DMA_CH_SETUP(val); |
1280 | XGENE_DMA_ENABLE(val); |
1281 | iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR); |
1282 | } |
1283 | |
1284 | static void xgene_dma_disable(struct xgene_dma *pdma) |
1285 | { |
1286 | u32 val; |
1287 | |
1288 | val = ioread32(pdma->csr_dma + XGENE_DMA_GCR); |
1289 | XGENE_DMA_DISABLE(val); |
1290 | iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR); |
1291 | } |
1292 | |
1293 | static void xgene_dma_mask_interrupts(struct xgene_dma *pdma) |
1294 | { |
1295 | /* |
1296 | * Mask DMA ring overflow, underflow and |
1297 | * AXI write/read error interrupts |
1298 | */ |
1299 | iowrite32(XGENE_DMA_INT_ALL_MASK, |
1300 | pdma->csr_dma + XGENE_DMA_RING_INT0_MASK); |
1301 | iowrite32(XGENE_DMA_INT_ALL_MASK, |
1302 | pdma->csr_dma + XGENE_DMA_RING_INT1_MASK); |
1303 | iowrite32(XGENE_DMA_INT_ALL_MASK, |
1304 | pdma->csr_dma + XGENE_DMA_RING_INT2_MASK); |
1305 | iowrite32(XGENE_DMA_INT_ALL_MASK, |
1306 | pdma->csr_dma + XGENE_DMA_RING_INT3_MASK); |
1307 | iowrite32(XGENE_DMA_INT_ALL_MASK, |
1308 | pdma->csr_dma + XGENE_DMA_RING_INT4_MASK); |
1309 | |
1310 | /* Mask DMA error interrupts */ |
1311 | iowrite32(XGENE_DMA_INT_ALL_MASK, pdma->csr_dma + XGENE_DMA_INT_MASK); |
1312 | } |
1313 | |
1314 | static void xgene_dma_unmask_interrupts(struct xgene_dma *pdma) |
1315 | { |
1316 | /* |
1317 | * Unmask DMA ring overflow, underflow and |
1318 | * AXI write/read error interrupts |
1319 | */ |
1320 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, |
1321 | pdma->csr_dma + XGENE_DMA_RING_INT0_MASK); |
1322 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, |
1323 | pdma->csr_dma + XGENE_DMA_RING_INT1_MASK); |
1324 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, |
1325 | pdma->csr_dma + XGENE_DMA_RING_INT2_MASK); |
1326 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, |
1327 | pdma->csr_dma + XGENE_DMA_RING_INT3_MASK); |
1328 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, |
1329 | pdma->csr_dma + XGENE_DMA_RING_INT4_MASK); |
1330 | |
1331 | /* Unmask DMA error interrupts */ |
1332 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, |
1333 | pdma->csr_dma + XGENE_DMA_INT_MASK); |
1334 | } |
1335 | |
1336 | static void xgene_dma_init_hw(struct xgene_dma *pdma) |
1337 | { |
1338 | u32 val; |
1339 | |
1340 | /* Associate DMA ring to corresponding ring HW */ |
1341 | iowrite32(XGENE_DMA_ASSOC_RING_MNGR1, |
1342 | pdma->csr_dma + XGENE_DMA_CFG_RING_WQ_ASSOC); |
1343 | |
1344 | /* Configure RAID6 polynomial control setting */ |
1345 | if (is_pq_enabled(pdma)) |
1346 | iowrite32(XGENE_DMA_RAID6_MULTI_CTRL(0x1D), |
1347 | pdma->csr_dma + XGENE_DMA_RAID6_CONT); |
1348 | else |
1349 | dev_info(pdma->dev, "PQ is disabled in HW\n" ); |
1350 | |
1351 | xgene_dma_enable(pdma); |
1352 | xgene_dma_unmask_interrupts(pdma); |
1353 | |
1354 | /* Get DMA id and version info */ |
1355 | val = ioread32(pdma->csr_dma + XGENE_DMA_IPBRR); |
1356 | |
1357 | /* DMA device info */ |
1358 | dev_info(pdma->dev, |
1359 | "X-Gene DMA v%d.%02d.%02d driver registered %d channels" , |
1360 | XGENE_DMA_REV_NO_RD(val), XGENE_DMA_BUS_ID_RD(val), |
1361 | XGENE_DMA_DEV_ID_RD(val), XGENE_DMA_MAX_CHANNEL); |
1362 | } |
1363 | |
1364 | static int xgene_dma_init_ring_mngr(struct xgene_dma *pdma) |
1365 | { |
1366 | if (ioread32(pdma->csr_ring + XGENE_DMA_RING_CLKEN) && |
1367 | (!ioread32(pdma->csr_ring + XGENE_DMA_RING_SRST))) |
1368 | return 0; |
1369 | |
1370 | iowrite32(0x3, pdma->csr_ring + XGENE_DMA_RING_CLKEN); |
1371 | iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_SRST); |
1372 | |
1373 | /* Bring up memory */ |
1374 | iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN); |
1375 | |
1376 | /* Force a barrier */ |
1377 | ioread32(pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN); |
1378 | |
1379 | /* reset may take up to 1ms */ |
1380 | usleep_range(min: 1000, max: 1100); |
1381 | |
1382 | if (ioread32(pdma->csr_ring + XGENE_DMA_RING_BLK_MEM_RDY) |
1383 | != XGENE_DMA_RING_BLK_MEM_RDY_VAL) { |
1384 | dev_err(pdma->dev, |
1385 | "Failed to release ring mngr memory from shutdown\n" ); |
1386 | return -ENODEV; |
1387 | } |
1388 | |
1389 | /* program threshold set 1 and all hysteresis */ |
1390 | iowrite32(XGENE_DMA_RING_THRESLD0_SET1_VAL, |
1391 | pdma->csr_ring + XGENE_DMA_RING_THRESLD0_SET1); |
1392 | iowrite32(XGENE_DMA_RING_THRESLD1_SET1_VAL, |
1393 | pdma->csr_ring + XGENE_DMA_RING_THRESLD1_SET1); |
1394 | iowrite32(XGENE_DMA_RING_HYSTERESIS_VAL, |
1395 | pdma->csr_ring + XGENE_DMA_RING_HYSTERESIS); |
1396 | |
1397 | /* Enable QPcore and assign error queue */ |
1398 | iowrite32(XGENE_DMA_RING_ENABLE, |
1399 | pdma->csr_ring + XGENE_DMA_RING_CONFIG); |
1400 | |
1401 | return 0; |
1402 | } |
1403 | |
1404 | static int xgene_dma_init_mem(struct xgene_dma *pdma) |
1405 | { |
1406 | int ret; |
1407 | |
1408 | ret = xgene_dma_init_ring_mngr(pdma); |
1409 | if (ret) |
1410 | return ret; |
1411 | |
1412 | /* Bring up memory */ |
1413 | iowrite32(0x0, pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN); |
1414 | |
1415 | /* Force a barrier */ |
1416 | ioread32(pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN); |
1417 | |
1418 | /* reset may take up to 1ms */ |
1419 | usleep_range(min: 1000, max: 1100); |
1420 | |
1421 | if (ioread32(pdma->csr_dma + XGENE_DMA_BLK_MEM_RDY) |
1422 | != XGENE_DMA_BLK_MEM_RDY_VAL) { |
1423 | dev_err(pdma->dev, |
1424 | "Failed to release DMA memory from shutdown\n" ); |
1425 | return -ENODEV; |
1426 | } |
1427 | |
1428 | return 0; |
1429 | } |
1430 | |
1431 | static int xgene_dma_request_irqs(struct xgene_dma *pdma) |
1432 | { |
1433 | struct xgene_dma_chan *chan; |
1434 | int ret, i, j; |
1435 | |
1436 | /* Register DMA error irq */ |
1437 | ret = devm_request_irq(dev: pdma->dev, irq: pdma->err_irq, handler: xgene_dma_err_isr, |
1438 | irqflags: 0, devname: "dma_error" , dev_id: pdma); |
1439 | if (ret) { |
1440 | dev_err(pdma->dev, |
1441 | "Failed to register error IRQ %d\n" , pdma->err_irq); |
1442 | return ret; |
1443 | } |
1444 | |
1445 | /* Register DMA channel rx irq */ |
1446 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { |
1447 | chan = &pdma->chan[i]; |
1448 | irq_set_status_flags(irq: chan->rx_irq, set: IRQ_DISABLE_UNLAZY); |
1449 | ret = devm_request_irq(dev: chan->dev, irq: chan->rx_irq, |
1450 | handler: xgene_dma_chan_ring_isr, |
1451 | irqflags: 0, devname: chan->name, dev_id: chan); |
1452 | if (ret) { |
1453 | chan_err(chan, "Failed to register Rx IRQ %d\n" , |
1454 | chan->rx_irq); |
1455 | devm_free_irq(dev: pdma->dev, irq: pdma->err_irq, dev_id: pdma); |
1456 | |
1457 | for (j = 0; j < i; j++) { |
1458 | chan = &pdma->chan[i]; |
1459 | irq_clear_status_flags(irq: chan->rx_irq, clr: IRQ_DISABLE_UNLAZY); |
1460 | devm_free_irq(dev: chan->dev, irq: chan->rx_irq, dev_id: chan); |
1461 | } |
1462 | |
1463 | return ret; |
1464 | } |
1465 | } |
1466 | |
1467 | return 0; |
1468 | } |
1469 | |
1470 | static void xgene_dma_free_irqs(struct xgene_dma *pdma) |
1471 | { |
1472 | struct xgene_dma_chan *chan; |
1473 | int i; |
1474 | |
1475 | /* Free DMA device error irq */ |
1476 | devm_free_irq(dev: pdma->dev, irq: pdma->err_irq, dev_id: pdma); |
1477 | |
1478 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { |
1479 | chan = &pdma->chan[i]; |
1480 | irq_clear_status_flags(irq: chan->rx_irq, clr: IRQ_DISABLE_UNLAZY); |
1481 | devm_free_irq(dev: chan->dev, irq: chan->rx_irq, dev_id: chan); |
1482 | } |
1483 | } |
1484 | |
1485 | static void xgene_dma_set_caps(struct xgene_dma_chan *chan, |
1486 | struct dma_device *dma_dev) |
1487 | { |
1488 | /* Initialize DMA device capability mask */ |
1489 | dma_cap_zero(dma_dev->cap_mask); |
1490 | |
1491 | /* Set DMA device capability */ |
1492 | |
1493 | /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR |
1494 | * and channel 1 supports XOR, PQ both. First thing here is we have |
1495 | * mechanism in hw to enable/disable PQ/XOR supports on channel 1, |
1496 | * we can make sure this by reading SoC Efuse register. |
1497 | * Second thing, we have hw errata that if we run channel 0 and |
1498 | * channel 1 simultaneously with executing XOR and PQ request, |
1499 | * suddenly DMA engine hangs, So here we enable XOR on channel 0 only |
1500 | * if XOR and PQ supports on channel 1 is disabled. |
1501 | */ |
1502 | if ((chan->id == XGENE_DMA_PQ_CHANNEL) && |
1503 | is_pq_enabled(pdma: chan->pdma)) { |
1504 | dma_cap_set(DMA_PQ, dma_dev->cap_mask); |
1505 | dma_cap_set(DMA_XOR, dma_dev->cap_mask); |
1506 | } else if ((chan->id == XGENE_DMA_XOR_CHANNEL) && |
1507 | !is_pq_enabled(pdma: chan->pdma)) { |
1508 | dma_cap_set(DMA_XOR, dma_dev->cap_mask); |
1509 | } |
1510 | |
1511 | /* Set base and prep routines */ |
1512 | dma_dev->dev = chan->dev; |
1513 | dma_dev->device_alloc_chan_resources = xgene_dma_alloc_chan_resources; |
1514 | dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources; |
1515 | dma_dev->device_issue_pending = xgene_dma_issue_pending; |
1516 | dma_dev->device_tx_status = xgene_dma_tx_status; |
1517 | |
1518 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
1519 | dma_dev->device_prep_dma_xor = xgene_dma_prep_xor; |
1520 | dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC; |
1521 | dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES; |
1522 | } |
1523 | |
1524 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { |
1525 | dma_dev->device_prep_dma_pq = xgene_dma_prep_pq; |
1526 | dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC; |
1527 | dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES; |
1528 | } |
1529 | } |
1530 | |
1531 | static int xgene_dma_async_register(struct xgene_dma *pdma, int id) |
1532 | { |
1533 | struct xgene_dma_chan *chan = &pdma->chan[id]; |
1534 | struct dma_device *dma_dev = &pdma->dma_dev[id]; |
1535 | int ret; |
1536 | |
1537 | chan->dma_chan.device = dma_dev; |
1538 | |
1539 | spin_lock_init(&chan->lock); |
1540 | INIT_LIST_HEAD(list: &chan->ld_pending); |
1541 | INIT_LIST_HEAD(list: &chan->ld_running); |
1542 | INIT_LIST_HEAD(list: &chan->ld_completed); |
1543 | tasklet_setup(t: &chan->tasklet, callback: xgene_dma_tasklet_cb); |
1544 | |
1545 | chan->pending = 0; |
1546 | chan->desc_pool = NULL; |
1547 | dma_cookie_init(chan: &chan->dma_chan); |
1548 | |
1549 | /* Setup dma device capabilities and prep routines */ |
1550 | xgene_dma_set_caps(chan, dma_dev); |
1551 | |
1552 | /* Initialize DMA device list head */ |
1553 | INIT_LIST_HEAD(list: &dma_dev->channels); |
1554 | list_add_tail(new: &chan->dma_chan.device_node, head: &dma_dev->channels); |
1555 | |
1556 | /* Register with Linux async DMA framework*/ |
1557 | ret = dma_async_device_register(device: dma_dev); |
1558 | if (ret) { |
1559 | chan_err(chan, "Failed to register async device %d" , ret); |
1560 | tasklet_kill(t: &chan->tasklet); |
1561 | |
1562 | return ret; |
1563 | } |
1564 | |
1565 | /* DMA capability info */ |
1566 | dev_info(pdma->dev, |
1567 | "%s: CAPABILITY ( %s%s)\n" , dma_chan_name(&chan->dma_chan), |
1568 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "" , |
1569 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "" ); |
1570 | |
1571 | return 0; |
1572 | } |
1573 | |
1574 | static int xgene_dma_init_async(struct xgene_dma *pdma) |
1575 | { |
1576 | int ret, i, j; |
1577 | |
1578 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL ; i++) { |
1579 | ret = xgene_dma_async_register(pdma, id: i); |
1580 | if (ret) { |
1581 | for (j = 0; j < i; j++) { |
1582 | dma_async_device_unregister(device: &pdma->dma_dev[j]); |
1583 | tasklet_kill(t: &pdma->chan[j].tasklet); |
1584 | } |
1585 | |
1586 | return ret; |
1587 | } |
1588 | } |
1589 | |
1590 | return ret; |
1591 | } |
1592 | |
1593 | static void xgene_dma_async_unregister(struct xgene_dma *pdma) |
1594 | { |
1595 | int i; |
1596 | |
1597 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) |
1598 | dma_async_device_unregister(device: &pdma->dma_dev[i]); |
1599 | } |
1600 | |
1601 | static void xgene_dma_init_channels(struct xgene_dma *pdma) |
1602 | { |
1603 | struct xgene_dma_chan *chan; |
1604 | int i; |
1605 | |
1606 | pdma->ring_num = XGENE_DMA_RING_NUM; |
1607 | |
1608 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { |
1609 | chan = &pdma->chan[i]; |
1610 | chan->dev = pdma->dev; |
1611 | chan->pdma = pdma; |
1612 | chan->id = i; |
1613 | snprintf(buf: chan->name, size: sizeof(chan->name), fmt: "dmachan%d" , chan->id); |
1614 | } |
1615 | } |
1616 | |
1617 | static int xgene_dma_get_resources(struct platform_device *pdev, |
1618 | struct xgene_dma *pdma) |
1619 | { |
1620 | struct resource *res; |
1621 | int irq, i; |
1622 | |
1623 | /* Get DMA csr region */ |
1624 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1625 | if (!res) { |
1626 | dev_err(&pdev->dev, "Failed to get csr region\n" ); |
1627 | return -ENXIO; |
1628 | } |
1629 | |
1630 | pdma->csr_dma = devm_ioremap(dev: &pdev->dev, offset: res->start, |
1631 | size: resource_size(res)); |
1632 | if (!pdma->csr_dma) { |
1633 | dev_err(&pdev->dev, "Failed to ioremap csr region" ); |
1634 | return -ENOMEM; |
1635 | } |
1636 | |
1637 | /* Get DMA ring csr region */ |
1638 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
1639 | if (!res) { |
1640 | dev_err(&pdev->dev, "Failed to get ring csr region\n" ); |
1641 | return -ENXIO; |
1642 | } |
1643 | |
1644 | pdma->csr_ring = devm_ioremap(dev: &pdev->dev, offset: res->start, |
1645 | size: resource_size(res)); |
1646 | if (!pdma->csr_ring) { |
1647 | dev_err(&pdev->dev, "Failed to ioremap ring csr region" ); |
1648 | return -ENOMEM; |
1649 | } |
1650 | |
1651 | /* Get DMA ring cmd csr region */ |
1652 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); |
1653 | if (!res) { |
1654 | dev_err(&pdev->dev, "Failed to get ring cmd csr region\n" ); |
1655 | return -ENXIO; |
1656 | } |
1657 | |
1658 | pdma->csr_ring_cmd = devm_ioremap(dev: &pdev->dev, offset: res->start, |
1659 | size: resource_size(res)); |
1660 | if (!pdma->csr_ring_cmd) { |
1661 | dev_err(&pdev->dev, "Failed to ioremap ring cmd csr region" ); |
1662 | return -ENOMEM; |
1663 | } |
1664 | |
1665 | pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET; |
1666 | |
1667 | /* Get efuse csr region */ |
1668 | res = platform_get_resource(pdev, IORESOURCE_MEM, 3); |
1669 | if (!res) { |
1670 | dev_err(&pdev->dev, "Failed to get efuse csr region\n" ); |
1671 | return -ENXIO; |
1672 | } |
1673 | |
1674 | pdma->csr_efuse = devm_ioremap(dev: &pdev->dev, offset: res->start, |
1675 | size: resource_size(res)); |
1676 | if (!pdma->csr_efuse) { |
1677 | dev_err(&pdev->dev, "Failed to ioremap efuse csr region" ); |
1678 | return -ENOMEM; |
1679 | } |
1680 | |
1681 | /* Get DMA error interrupt */ |
1682 | irq = platform_get_irq(pdev, 0); |
1683 | if (irq <= 0) |
1684 | return -ENXIO; |
1685 | |
1686 | pdma->err_irq = irq; |
1687 | |
1688 | /* Get DMA Rx ring descriptor interrupts for all DMA channels */ |
1689 | for (i = 1; i <= XGENE_DMA_MAX_CHANNEL; i++) { |
1690 | irq = platform_get_irq(pdev, i); |
1691 | if (irq <= 0) |
1692 | return -ENXIO; |
1693 | |
1694 | pdma->chan[i - 1].rx_irq = irq; |
1695 | } |
1696 | |
1697 | return 0; |
1698 | } |
1699 | |
1700 | static int xgene_dma_probe(struct platform_device *pdev) |
1701 | { |
1702 | struct xgene_dma *pdma; |
1703 | int ret, i; |
1704 | |
1705 | pdma = devm_kzalloc(dev: &pdev->dev, size: sizeof(*pdma), GFP_KERNEL); |
1706 | if (!pdma) |
1707 | return -ENOMEM; |
1708 | |
1709 | pdma->dev = &pdev->dev; |
1710 | platform_set_drvdata(pdev, data: pdma); |
1711 | |
1712 | ret = xgene_dma_get_resources(pdev, pdma); |
1713 | if (ret) |
1714 | return ret; |
1715 | |
1716 | pdma->clk = devm_clk_get(dev: &pdev->dev, NULL); |
1717 | if (IS_ERR(ptr: pdma->clk) && !ACPI_COMPANION(&pdev->dev)) { |
1718 | dev_err(&pdev->dev, "Failed to get clk\n" ); |
1719 | return PTR_ERR(ptr: pdma->clk); |
1720 | } |
1721 | |
1722 | /* Enable clk before accessing registers */ |
1723 | if (!IS_ERR(ptr: pdma->clk)) { |
1724 | ret = clk_prepare_enable(clk: pdma->clk); |
1725 | if (ret) { |
1726 | dev_err(&pdev->dev, "Failed to enable clk %d\n" , ret); |
1727 | return ret; |
1728 | } |
1729 | } |
1730 | |
1731 | /* Remove DMA RAM out of shutdown */ |
1732 | ret = xgene_dma_init_mem(pdma); |
1733 | if (ret) |
1734 | goto err_clk_enable; |
1735 | |
1736 | ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(42)); |
1737 | if (ret) { |
1738 | dev_err(&pdev->dev, "No usable DMA configuration\n" ); |
1739 | goto err_dma_mask; |
1740 | } |
1741 | |
1742 | /* Initialize DMA channels software state */ |
1743 | xgene_dma_init_channels(pdma); |
1744 | |
1745 | /* Configue DMA rings */ |
1746 | ret = xgene_dma_init_rings(pdma); |
1747 | if (ret) |
1748 | goto err_clk_enable; |
1749 | |
1750 | ret = xgene_dma_request_irqs(pdma); |
1751 | if (ret) |
1752 | goto err_request_irq; |
1753 | |
1754 | /* Configure and enable DMA engine */ |
1755 | xgene_dma_init_hw(pdma); |
1756 | |
1757 | /* Register DMA device with linux async framework */ |
1758 | ret = xgene_dma_init_async(pdma); |
1759 | if (ret) |
1760 | goto err_async_init; |
1761 | |
1762 | return 0; |
1763 | |
1764 | err_async_init: |
1765 | xgene_dma_free_irqs(pdma); |
1766 | |
1767 | err_request_irq: |
1768 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) |
1769 | xgene_dma_delete_chan_rings(chan: &pdma->chan[i]); |
1770 | |
1771 | err_dma_mask: |
1772 | err_clk_enable: |
1773 | if (!IS_ERR(ptr: pdma->clk)) |
1774 | clk_disable_unprepare(clk: pdma->clk); |
1775 | |
1776 | return ret; |
1777 | } |
1778 | |
1779 | static void xgene_dma_remove(struct platform_device *pdev) |
1780 | { |
1781 | struct xgene_dma *pdma = platform_get_drvdata(pdev); |
1782 | struct xgene_dma_chan *chan; |
1783 | int i; |
1784 | |
1785 | xgene_dma_async_unregister(pdma); |
1786 | |
1787 | /* Mask interrupts and disable DMA engine */ |
1788 | xgene_dma_mask_interrupts(pdma); |
1789 | xgene_dma_disable(pdma); |
1790 | xgene_dma_free_irqs(pdma); |
1791 | |
1792 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { |
1793 | chan = &pdma->chan[i]; |
1794 | tasklet_kill(t: &chan->tasklet); |
1795 | xgene_dma_delete_chan_rings(chan); |
1796 | } |
1797 | |
1798 | if (!IS_ERR(ptr: pdma->clk)) |
1799 | clk_disable_unprepare(clk: pdma->clk); |
1800 | } |
1801 | |
1802 | #ifdef CONFIG_ACPI |
1803 | static const struct acpi_device_id xgene_dma_acpi_match_ptr[] = { |
1804 | {"APMC0D43" , 0}, |
1805 | {}, |
1806 | }; |
1807 | MODULE_DEVICE_TABLE(acpi, xgene_dma_acpi_match_ptr); |
1808 | #endif |
1809 | |
1810 | static const struct of_device_id xgene_dma_of_match_ptr[] = { |
1811 | {.compatible = "apm,xgene-storm-dma" ,}, |
1812 | {}, |
1813 | }; |
1814 | MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr); |
1815 | |
1816 | static struct platform_driver xgene_dma_driver = { |
1817 | .probe = xgene_dma_probe, |
1818 | .remove_new = xgene_dma_remove, |
1819 | .driver = { |
1820 | .name = "X-Gene-DMA" , |
1821 | .of_match_table = xgene_dma_of_match_ptr, |
1822 | .acpi_match_table = ACPI_PTR(xgene_dma_acpi_match_ptr), |
1823 | }, |
1824 | }; |
1825 | |
1826 | module_platform_driver(xgene_dma_driver); |
1827 | |
1828 | MODULE_DESCRIPTION("APM X-Gene SoC DMA driver" ); |
1829 | MODULE_AUTHOR("Rameshwar Prasad Sahu <rsahu@apm.com>" ); |
1830 | MODULE_AUTHOR("Loc Ho <lho@apm.com>" ); |
1831 | MODULE_LICENSE("GPL" ); |
1832 | MODULE_VERSION("1.0" ); |
1833 | |