1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Microsemi Switchtec(tm) PCIe Management Driver |
4 | * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com> |
5 | * Copyright (c) 2019, GigaIO Networks, Inc |
6 | */ |
7 | |
8 | #include "dmaengine.h" |
9 | |
10 | #include <linux/circ_buf.h> |
11 | #include <linux/dmaengine.h> |
12 | #include <linux/kref.h> |
13 | #include <linux/list.h> |
14 | #include <linux/module.h> |
15 | #include <linux/pci.h> |
16 | |
17 | MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine" ); |
18 | MODULE_VERSION("0.1" ); |
19 | MODULE_LICENSE("GPL" ); |
20 | MODULE_AUTHOR("Logan Gunthorpe" ); |
21 | |
22 | #define PLX_REG_DESC_RING_ADDR 0x214 |
23 | #define PLX_REG_DESC_RING_ADDR_HI 0x218 |
24 | #define PLX_REG_DESC_RING_NEXT_ADDR 0x21C |
25 | #define PLX_REG_DESC_RING_COUNT 0x220 |
26 | #define PLX_REG_DESC_RING_LAST_ADDR 0x224 |
27 | #define PLX_REG_DESC_RING_LAST_SIZE 0x228 |
28 | #define PLX_REG_PREF_LIMIT 0x234 |
29 | #define PLX_REG_CTRL 0x238 |
30 | #define PLX_REG_CTRL2 0x23A |
31 | #define PLX_REG_INTR_CTRL 0x23C |
32 | #define PLX_REG_INTR_STATUS 0x23E |
33 | |
34 | #define PLX_REG_PREF_LIMIT_PREF_FOUR 8 |
35 | |
36 | #define PLX_REG_CTRL_GRACEFUL_PAUSE BIT(0) |
37 | #define PLX_REG_CTRL_ABORT BIT(1) |
38 | #define PLX_REG_CTRL_WRITE_BACK_EN BIT(2) |
39 | #define PLX_REG_CTRL_START BIT(3) |
40 | #define PLX_REG_CTRL_RING_STOP_MODE BIT(4) |
41 | #define PLX_REG_CTRL_DESC_MODE_BLOCK (0 << 5) |
42 | #define PLX_REG_CTRL_DESC_MODE_ON_CHIP (1 << 5) |
43 | #define PLX_REG_CTRL_DESC_MODE_OFF_CHIP (2 << 5) |
44 | #define PLX_REG_CTRL_DESC_INVALID BIT(8) |
45 | #define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE BIT(9) |
46 | #define PLX_REG_CTRL_ABORT_DONE BIT(10) |
47 | #define PLX_REG_CTRL_IMM_PAUSE_DONE BIT(12) |
48 | #define PLX_REG_CTRL_IN_PROGRESS BIT(30) |
49 | |
50 | #define PLX_REG_CTRL_RESET_VAL (PLX_REG_CTRL_DESC_INVALID | \ |
51 | PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \ |
52 | PLX_REG_CTRL_ABORT_DONE | \ |
53 | PLX_REG_CTRL_IMM_PAUSE_DONE) |
54 | |
55 | #define PLX_REG_CTRL_START_VAL (PLX_REG_CTRL_WRITE_BACK_EN | \ |
56 | PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \ |
57 | PLX_REG_CTRL_START | \ |
58 | PLX_REG_CTRL_RESET_VAL) |
59 | |
60 | #define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B 0 |
61 | #define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B 1 |
62 | #define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B 2 |
63 | #define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B 3 |
64 | #define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB 4 |
65 | #define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB 5 |
66 | #define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B 7 |
67 | |
68 | #define PLX_REG_INTR_CRTL_ERROR_EN BIT(0) |
69 | #define PLX_REG_INTR_CRTL_INV_DESC_EN BIT(1) |
70 | #define PLX_REG_INTR_CRTL_ABORT_DONE_EN BIT(3) |
71 | #define PLX_REG_INTR_CRTL_PAUSE_DONE_EN BIT(4) |
72 | #define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN BIT(5) |
73 | |
74 | #define PLX_REG_INTR_STATUS_ERROR BIT(0) |
75 | #define PLX_REG_INTR_STATUS_INV_DESC BIT(1) |
76 | #define PLX_REG_INTR_STATUS_DESC_DONE BIT(2) |
77 | #define PLX_REG_INTR_CRTL_ABORT_DONE BIT(3) |
78 | |
79 | struct plx_dma_hw_std_desc { |
80 | __le32 flags_and_size; |
81 | __le16 dst_addr_hi; |
82 | __le16 src_addr_hi; |
83 | __le32 dst_addr_lo; |
84 | __le32 src_addr_lo; |
85 | }; |
86 | |
87 | #define PLX_DESC_SIZE_MASK 0x7ffffff |
88 | #define PLX_DESC_FLAG_VALID BIT(31) |
89 | #define PLX_DESC_FLAG_INT_WHEN_DONE BIT(30) |
90 | |
91 | #define PLX_DESC_WB_SUCCESS BIT(30) |
92 | #define PLX_DESC_WB_RD_FAIL BIT(29) |
93 | #define PLX_DESC_WB_WR_FAIL BIT(28) |
94 | |
95 | #define PLX_DMA_RING_COUNT 2048 |
96 | |
97 | struct plx_dma_desc { |
98 | struct dma_async_tx_descriptor txd; |
99 | struct plx_dma_hw_std_desc *hw; |
100 | u32 orig_size; |
101 | }; |
102 | |
103 | struct plx_dma_dev { |
104 | struct dma_device dma_dev; |
105 | struct dma_chan dma_chan; |
106 | struct pci_dev __rcu *pdev; |
107 | void __iomem *bar; |
108 | struct tasklet_struct desc_task; |
109 | |
110 | spinlock_t ring_lock; |
111 | bool ring_active; |
112 | int head; |
113 | int tail; |
114 | struct plx_dma_hw_std_desc *hw_ring; |
115 | dma_addr_t hw_ring_dma; |
116 | struct plx_dma_desc **desc_ring; |
117 | }; |
118 | |
119 | static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c) |
120 | { |
121 | return container_of(c, struct plx_dma_dev, dma_chan); |
122 | } |
123 | |
124 | static struct plx_dma_desc *to_plx_desc(struct dma_async_tx_descriptor *txd) |
125 | { |
126 | return container_of(txd, struct plx_dma_desc, txd); |
127 | } |
128 | |
129 | static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i) |
130 | { |
131 | return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)]; |
132 | } |
133 | |
134 | static void plx_dma_process_desc(struct plx_dma_dev *plxdev) |
135 | { |
136 | struct dmaengine_result res; |
137 | struct plx_dma_desc *desc; |
138 | u32 flags; |
139 | |
140 | spin_lock(lock: &plxdev->ring_lock); |
141 | |
142 | while (plxdev->tail != plxdev->head) { |
143 | desc = plx_dma_get_desc(plxdev, i: plxdev->tail); |
144 | |
145 | flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size)); |
146 | |
147 | if (flags & PLX_DESC_FLAG_VALID) |
148 | break; |
149 | |
150 | res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK); |
151 | |
152 | if (flags & PLX_DESC_WB_SUCCESS) |
153 | res.result = DMA_TRANS_NOERROR; |
154 | else if (flags & PLX_DESC_WB_WR_FAIL) |
155 | res.result = DMA_TRANS_WRITE_FAILED; |
156 | else |
157 | res.result = DMA_TRANS_READ_FAILED; |
158 | |
159 | dma_cookie_complete(tx: &desc->txd); |
160 | dma_descriptor_unmap(tx: &desc->txd); |
161 | dmaengine_desc_get_callback_invoke(tx: &desc->txd, result: &res); |
162 | desc->txd.callback = NULL; |
163 | desc->txd.callback_result = NULL; |
164 | |
165 | plxdev->tail++; |
166 | } |
167 | |
168 | spin_unlock(lock: &plxdev->ring_lock); |
169 | } |
170 | |
171 | static void plx_dma_abort_desc(struct plx_dma_dev *plxdev) |
172 | { |
173 | struct dmaengine_result res; |
174 | struct plx_dma_desc *desc; |
175 | |
176 | plx_dma_process_desc(plxdev); |
177 | |
178 | spin_lock_bh(lock: &plxdev->ring_lock); |
179 | |
180 | while (plxdev->tail != plxdev->head) { |
181 | desc = plx_dma_get_desc(plxdev, i: plxdev->tail); |
182 | |
183 | res.residue = desc->orig_size; |
184 | res.result = DMA_TRANS_ABORTED; |
185 | |
186 | dma_cookie_complete(tx: &desc->txd); |
187 | dma_descriptor_unmap(tx: &desc->txd); |
188 | dmaengine_desc_get_callback_invoke(tx: &desc->txd, result: &res); |
189 | desc->txd.callback = NULL; |
190 | desc->txd.callback_result = NULL; |
191 | |
192 | plxdev->tail++; |
193 | } |
194 | |
195 | spin_unlock_bh(lock: &plxdev->ring_lock); |
196 | } |
197 | |
198 | static void __plx_dma_stop(struct plx_dma_dev *plxdev) |
199 | { |
200 | unsigned long timeout = jiffies + msecs_to_jiffies(m: 1000); |
201 | u32 val; |
202 | |
203 | val = readl(addr: plxdev->bar + PLX_REG_CTRL); |
204 | if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE)) |
205 | return; |
206 | |
207 | writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE, |
208 | addr: plxdev->bar + PLX_REG_CTRL); |
209 | |
210 | while (!time_after(jiffies, timeout)) { |
211 | val = readl(addr: plxdev->bar + PLX_REG_CTRL); |
212 | if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE) |
213 | break; |
214 | |
215 | cpu_relax(); |
216 | } |
217 | |
218 | if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)) |
219 | dev_err(plxdev->dma_dev.dev, |
220 | "Timeout waiting for graceful pause!\n" ); |
221 | |
222 | writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE, |
223 | addr: plxdev->bar + PLX_REG_CTRL); |
224 | |
225 | writel(val: 0, addr: plxdev->bar + PLX_REG_DESC_RING_COUNT); |
226 | writel(val: 0, addr: plxdev->bar + PLX_REG_DESC_RING_ADDR); |
227 | writel(val: 0, addr: plxdev->bar + PLX_REG_DESC_RING_ADDR_HI); |
228 | writel(val: 0, addr: plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR); |
229 | } |
230 | |
231 | static void plx_dma_stop(struct plx_dma_dev *plxdev) |
232 | { |
233 | rcu_read_lock(); |
234 | if (!rcu_dereference(plxdev->pdev)) { |
235 | rcu_read_unlock(); |
236 | return; |
237 | } |
238 | |
239 | __plx_dma_stop(plxdev); |
240 | |
241 | rcu_read_unlock(); |
242 | } |
243 | |
244 | static void plx_dma_desc_task(struct tasklet_struct *t) |
245 | { |
246 | struct plx_dma_dev *plxdev = from_tasklet(plxdev, t, desc_task); |
247 | |
248 | plx_dma_process_desc(plxdev); |
249 | } |
250 | |
251 | static struct dma_async_tx_descriptor *plx_dma_prep_memcpy(struct dma_chan *c, |
252 | dma_addr_t dma_dst, dma_addr_t dma_src, size_t len, |
253 | unsigned long flags) |
254 | __acquires(plxdev->ring_lock) |
255 | { |
256 | struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c); |
257 | struct plx_dma_desc *plxdesc; |
258 | |
259 | spin_lock_bh(lock: &plxdev->ring_lock); |
260 | if (!plxdev->ring_active) |
261 | goto err_unlock; |
262 | |
263 | if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT)) |
264 | goto err_unlock; |
265 | |
266 | if (len > PLX_DESC_SIZE_MASK) |
267 | goto err_unlock; |
268 | |
269 | plxdesc = plx_dma_get_desc(plxdev, i: plxdev->head); |
270 | plxdev->head++; |
271 | |
272 | plxdesc->hw->dst_addr_lo = cpu_to_le32(lower_32_bits(dma_dst)); |
273 | plxdesc->hw->dst_addr_hi = cpu_to_le16(upper_32_bits(dma_dst)); |
274 | plxdesc->hw->src_addr_lo = cpu_to_le32(lower_32_bits(dma_src)); |
275 | plxdesc->hw->src_addr_hi = cpu_to_le16(upper_32_bits(dma_src)); |
276 | |
277 | plxdesc->orig_size = len; |
278 | |
279 | if (flags & DMA_PREP_INTERRUPT) |
280 | len |= PLX_DESC_FLAG_INT_WHEN_DONE; |
281 | |
282 | plxdesc->hw->flags_and_size = cpu_to_le32(len); |
283 | plxdesc->txd.flags = flags; |
284 | |
285 | /* return with the lock held, it will be released in tx_submit */ |
286 | |
287 | return &plxdesc->txd; |
288 | |
289 | err_unlock: |
290 | /* |
291 | * Keep sparse happy by restoring an even lock count on |
292 | * this lock. |
293 | */ |
294 | __acquire(plxdev->ring_lock); |
295 | |
296 | spin_unlock_bh(lock: &plxdev->ring_lock); |
297 | return NULL; |
298 | } |
299 | |
300 | static dma_cookie_t plx_dma_tx_submit(struct dma_async_tx_descriptor *desc) |
301 | __releases(plxdev->ring_lock) |
302 | { |
303 | struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c: desc->chan); |
304 | struct plx_dma_desc *plxdesc = to_plx_desc(txd: desc); |
305 | dma_cookie_t cookie; |
306 | |
307 | cookie = dma_cookie_assign(tx: desc); |
308 | |
309 | /* |
310 | * Ensure the descriptor updates are visible to the dma device |
311 | * before setting the valid bit. |
312 | */ |
313 | wmb(); |
314 | |
315 | plxdesc->hw->flags_and_size |= cpu_to_le32(PLX_DESC_FLAG_VALID); |
316 | |
317 | spin_unlock_bh(lock: &plxdev->ring_lock); |
318 | |
319 | return cookie; |
320 | } |
321 | |
322 | static enum dma_status plx_dma_tx_status(struct dma_chan *chan, |
323 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
324 | { |
325 | struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c: chan); |
326 | enum dma_status ret; |
327 | |
328 | ret = dma_cookie_status(chan, cookie, state: txstate); |
329 | if (ret == DMA_COMPLETE) |
330 | return ret; |
331 | |
332 | plx_dma_process_desc(plxdev); |
333 | |
334 | return dma_cookie_status(chan, cookie, state: txstate); |
335 | } |
336 | |
337 | static void plx_dma_issue_pending(struct dma_chan *chan) |
338 | { |
339 | struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c: chan); |
340 | |
341 | rcu_read_lock(); |
342 | if (!rcu_dereference(plxdev->pdev)) { |
343 | rcu_read_unlock(); |
344 | return; |
345 | } |
346 | |
347 | /* |
348 | * Ensure the valid bits are visible before starting the |
349 | * DMA engine. |
350 | */ |
351 | wmb(); |
352 | |
353 | writew(PLX_REG_CTRL_START_VAL, addr: plxdev->bar + PLX_REG_CTRL); |
354 | |
355 | rcu_read_unlock(); |
356 | } |
357 | |
358 | static irqreturn_t plx_dma_isr(int irq, void *devid) |
359 | { |
360 | struct plx_dma_dev *plxdev = devid; |
361 | u32 status; |
362 | |
363 | status = readw(addr: plxdev->bar + PLX_REG_INTR_STATUS); |
364 | |
365 | if (!status) |
366 | return IRQ_NONE; |
367 | |
368 | if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active) |
369 | tasklet_schedule(t: &plxdev->desc_task); |
370 | |
371 | writew(val: status, addr: plxdev->bar + PLX_REG_INTR_STATUS); |
372 | |
373 | return IRQ_HANDLED; |
374 | } |
375 | |
376 | static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev) |
377 | { |
378 | struct plx_dma_desc *desc; |
379 | int i; |
380 | |
381 | plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT, |
382 | size: sizeof(*plxdev->desc_ring), GFP_KERNEL); |
383 | if (!plxdev->desc_ring) |
384 | return -ENOMEM; |
385 | |
386 | for (i = 0; i < PLX_DMA_RING_COUNT; i++) { |
387 | desc = kzalloc(size: sizeof(*desc), GFP_KERNEL); |
388 | if (!desc) |
389 | goto free_and_exit; |
390 | |
391 | dma_async_tx_descriptor_init(tx: &desc->txd, chan: &plxdev->dma_chan); |
392 | desc->txd.tx_submit = plx_dma_tx_submit; |
393 | desc->hw = &plxdev->hw_ring[i]; |
394 | |
395 | plxdev->desc_ring[i] = desc; |
396 | } |
397 | |
398 | return 0; |
399 | |
400 | free_and_exit: |
401 | for (i = 0; i < PLX_DMA_RING_COUNT; i++) |
402 | kfree(objp: plxdev->desc_ring[i]); |
403 | kfree(objp: plxdev->desc_ring); |
404 | return -ENOMEM; |
405 | } |
406 | |
407 | static int plx_dma_alloc_chan_resources(struct dma_chan *chan) |
408 | { |
409 | struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c: chan); |
410 | size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring); |
411 | int rc; |
412 | |
413 | plxdev->head = plxdev->tail = 0; |
414 | plxdev->hw_ring = dma_alloc_coherent(dev: plxdev->dma_dev.dev, size: ring_sz, |
415 | dma_handle: &plxdev->hw_ring_dma, GFP_KERNEL); |
416 | if (!plxdev->hw_ring) |
417 | return -ENOMEM; |
418 | |
419 | rc = plx_dma_alloc_desc(plxdev); |
420 | if (rc) |
421 | goto out_free_hw_ring; |
422 | |
423 | rcu_read_lock(); |
424 | if (!rcu_dereference(plxdev->pdev)) { |
425 | rcu_read_unlock(); |
426 | rc = -ENODEV; |
427 | goto out_free_hw_ring; |
428 | } |
429 | |
430 | writel(PLX_REG_CTRL_RESET_VAL, addr: plxdev->bar + PLX_REG_CTRL); |
431 | writel(lower_32_bits(plxdev->hw_ring_dma), |
432 | addr: plxdev->bar + PLX_REG_DESC_RING_ADDR); |
433 | writel(upper_32_bits(plxdev->hw_ring_dma), |
434 | addr: plxdev->bar + PLX_REG_DESC_RING_ADDR_HI); |
435 | writel(lower_32_bits(plxdev->hw_ring_dma), |
436 | addr: plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR); |
437 | writel(PLX_DMA_RING_COUNT, addr: plxdev->bar + PLX_REG_DESC_RING_COUNT); |
438 | writel(PLX_REG_PREF_LIMIT_PREF_FOUR, addr: plxdev->bar + PLX_REG_PREF_LIMIT); |
439 | |
440 | plxdev->ring_active = true; |
441 | |
442 | rcu_read_unlock(); |
443 | |
444 | return PLX_DMA_RING_COUNT; |
445 | |
446 | out_free_hw_ring: |
447 | dma_free_coherent(dev: plxdev->dma_dev.dev, size: ring_sz, cpu_addr: plxdev->hw_ring, |
448 | dma_handle: plxdev->hw_ring_dma); |
449 | return rc; |
450 | } |
451 | |
452 | static void plx_dma_free_chan_resources(struct dma_chan *chan) |
453 | { |
454 | struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c: chan); |
455 | size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring); |
456 | struct pci_dev *pdev; |
457 | int irq = -1; |
458 | int i; |
459 | |
460 | spin_lock_bh(lock: &plxdev->ring_lock); |
461 | plxdev->ring_active = false; |
462 | spin_unlock_bh(lock: &plxdev->ring_lock); |
463 | |
464 | plx_dma_stop(plxdev); |
465 | |
466 | rcu_read_lock(); |
467 | pdev = rcu_dereference(plxdev->pdev); |
468 | if (pdev) |
469 | irq = pci_irq_vector(dev: pdev, nr: 0); |
470 | rcu_read_unlock(); |
471 | |
472 | if (irq > 0) |
473 | synchronize_irq(irq); |
474 | |
475 | tasklet_kill(t: &plxdev->desc_task); |
476 | |
477 | plx_dma_abort_desc(plxdev); |
478 | |
479 | for (i = 0; i < PLX_DMA_RING_COUNT; i++) |
480 | kfree(objp: plxdev->desc_ring[i]); |
481 | |
482 | kfree(objp: plxdev->desc_ring); |
483 | dma_free_coherent(dev: plxdev->dma_dev.dev, size: ring_sz, cpu_addr: plxdev->hw_ring, |
484 | dma_handle: plxdev->hw_ring_dma); |
485 | |
486 | } |
487 | |
488 | static void plx_dma_release(struct dma_device *dma_dev) |
489 | { |
490 | struct plx_dma_dev *plxdev = |
491 | container_of(dma_dev, struct plx_dma_dev, dma_dev); |
492 | |
493 | put_device(dev: dma_dev->dev); |
494 | kfree(objp: plxdev); |
495 | } |
496 | |
497 | static int plx_dma_create(struct pci_dev *pdev) |
498 | { |
499 | struct plx_dma_dev *plxdev; |
500 | struct dma_device *dma; |
501 | struct dma_chan *chan; |
502 | int rc; |
503 | |
504 | plxdev = kzalloc(size: sizeof(*plxdev), GFP_KERNEL); |
505 | if (!plxdev) |
506 | return -ENOMEM; |
507 | |
508 | rc = request_irq(irq: pci_irq_vector(dev: pdev, nr: 0), handler: plx_dma_isr, flags: 0, |
509 | KBUILD_MODNAME, dev: plxdev); |
510 | if (rc) |
511 | goto free_plx; |
512 | |
513 | spin_lock_init(&plxdev->ring_lock); |
514 | tasklet_setup(t: &plxdev->desc_task, callback: plx_dma_desc_task); |
515 | |
516 | RCU_INIT_POINTER(plxdev->pdev, pdev); |
517 | plxdev->bar = pcim_iomap_table(pdev)[0]; |
518 | |
519 | dma = &plxdev->dma_dev; |
520 | INIT_LIST_HEAD(list: &dma->channels); |
521 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); |
522 | dma->copy_align = DMAENGINE_ALIGN_1_BYTE; |
523 | dma->dev = get_device(dev: &pdev->dev); |
524 | |
525 | dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources; |
526 | dma->device_free_chan_resources = plx_dma_free_chan_resources; |
527 | dma->device_prep_dma_memcpy = plx_dma_prep_memcpy; |
528 | dma->device_issue_pending = plx_dma_issue_pending; |
529 | dma->device_tx_status = plx_dma_tx_status; |
530 | dma->device_release = plx_dma_release; |
531 | |
532 | chan = &plxdev->dma_chan; |
533 | chan->device = dma; |
534 | dma_cookie_init(chan); |
535 | list_add_tail(new: &chan->device_node, head: &dma->channels); |
536 | |
537 | rc = dma_async_device_register(device: dma); |
538 | if (rc) { |
539 | pci_err(pdev, "Failed to register dma device: %d\n" , rc); |
540 | goto put_device; |
541 | } |
542 | |
543 | pci_set_drvdata(pdev, data: plxdev); |
544 | |
545 | return 0; |
546 | |
547 | put_device: |
548 | put_device(dev: &pdev->dev); |
549 | free_irq(pci_irq_vector(dev: pdev, nr: 0), plxdev); |
550 | free_plx: |
551 | kfree(objp: plxdev); |
552 | |
553 | return rc; |
554 | } |
555 | |
556 | static int plx_dma_probe(struct pci_dev *pdev, |
557 | const struct pci_device_id *id) |
558 | { |
559 | int rc; |
560 | |
561 | rc = pcim_enable_device(pdev); |
562 | if (rc) |
563 | return rc; |
564 | |
565 | rc = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(48)); |
566 | if (rc) |
567 | rc = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(32)); |
568 | if (rc) |
569 | return rc; |
570 | |
571 | rc = pcim_iomap_regions(pdev, mask: 1, KBUILD_MODNAME); |
572 | if (rc) |
573 | return rc; |
574 | |
575 | rc = pci_alloc_irq_vectors(dev: pdev, min_vecs: 1, max_vecs: 1, PCI_IRQ_ALL_TYPES); |
576 | if (rc <= 0) |
577 | return rc; |
578 | |
579 | pci_set_master(dev: pdev); |
580 | |
581 | rc = plx_dma_create(pdev); |
582 | if (rc) |
583 | goto err_free_irq_vectors; |
584 | |
585 | pci_info(pdev, "PLX DMA Channel Registered\n" ); |
586 | |
587 | return 0; |
588 | |
589 | err_free_irq_vectors: |
590 | pci_free_irq_vectors(dev: pdev); |
591 | return rc; |
592 | } |
593 | |
594 | static void plx_dma_remove(struct pci_dev *pdev) |
595 | { |
596 | struct plx_dma_dev *plxdev = pci_get_drvdata(pdev); |
597 | |
598 | free_irq(pci_irq_vector(dev: pdev, nr: 0), plxdev); |
599 | |
600 | rcu_assign_pointer(plxdev->pdev, NULL); |
601 | synchronize_rcu(); |
602 | |
603 | spin_lock_bh(lock: &plxdev->ring_lock); |
604 | plxdev->ring_active = false; |
605 | spin_unlock_bh(lock: &plxdev->ring_lock); |
606 | |
607 | __plx_dma_stop(plxdev); |
608 | plx_dma_abort_desc(plxdev); |
609 | |
610 | plxdev->bar = NULL; |
611 | dma_async_device_unregister(device: &plxdev->dma_dev); |
612 | |
613 | pci_free_irq_vectors(dev: pdev); |
614 | } |
615 | |
616 | static const struct pci_device_id plx_dma_pci_tbl[] = { |
617 | { |
618 | .vendor = PCI_VENDOR_ID_PLX, |
619 | .device = 0x87D0, |
620 | .subvendor = PCI_ANY_ID, |
621 | .subdevice = PCI_ANY_ID, |
622 | .class = PCI_CLASS_SYSTEM_OTHER << 8, |
623 | .class_mask = 0xFFFFFFFF, |
624 | }, |
625 | {0} |
626 | }; |
627 | MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl); |
628 | |
629 | static struct pci_driver plx_dma_pci_driver = { |
630 | .name = KBUILD_MODNAME, |
631 | .id_table = plx_dma_pci_tbl, |
632 | .probe = plx_dma_probe, |
633 | .remove = plx_dma_remove, |
634 | }; |
635 | module_pci_driver(plx_dma_pci_driver); |
636 | |