1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/dmaengine.h> |
7 | #include <crypto/scatterwalk.h> |
8 | |
9 | #include "dma.h" |
10 | |
11 | int qce_dma_request(struct device *dev, struct qce_dma_data *dma) |
12 | { |
13 | int ret; |
14 | |
15 | dma->txchan = dma_request_chan(dev, name: "tx" ); |
16 | if (IS_ERR(ptr: dma->txchan)) |
17 | return PTR_ERR(ptr: dma->txchan); |
18 | |
19 | dma->rxchan = dma_request_chan(dev, name: "rx" ); |
20 | if (IS_ERR(ptr: dma->rxchan)) { |
21 | ret = PTR_ERR(ptr: dma->rxchan); |
22 | goto error_rx; |
23 | } |
24 | |
25 | dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ, |
26 | GFP_KERNEL); |
27 | if (!dma->result_buf) { |
28 | ret = -ENOMEM; |
29 | goto error_nomem; |
30 | } |
31 | |
32 | dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ; |
33 | |
34 | return 0; |
35 | error_nomem: |
36 | dma_release_channel(chan: dma->rxchan); |
37 | error_rx: |
38 | dma_release_channel(chan: dma->txchan); |
39 | return ret; |
40 | } |
41 | |
42 | void qce_dma_release(struct qce_dma_data *dma) |
43 | { |
44 | dma_release_channel(chan: dma->txchan); |
45 | dma_release_channel(chan: dma->rxchan); |
46 | kfree(objp: dma->result_buf); |
47 | } |
48 | |
49 | struct scatterlist * |
50 | qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl, |
51 | unsigned int max_len) |
52 | { |
53 | struct scatterlist *sg = sgt->sgl, *sg_last = NULL; |
54 | unsigned int new_len; |
55 | |
56 | while (sg) { |
57 | if (!sg_page(sg)) |
58 | break; |
59 | sg = sg_next(sg); |
60 | } |
61 | |
62 | if (!sg) |
63 | return ERR_PTR(error: -EINVAL); |
64 | |
65 | while (new_sgl && sg && max_len) { |
66 | new_len = new_sgl->length > max_len ? max_len : new_sgl->length; |
67 | sg_set_page(sg, page: sg_page(sg: new_sgl), len: new_len, offset: new_sgl->offset); |
68 | sg_last = sg; |
69 | sg = sg_next(sg); |
70 | new_sgl = sg_next(new_sgl); |
71 | max_len -= new_len; |
72 | } |
73 | |
74 | return sg_last; |
75 | } |
76 | |
77 | static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg, |
78 | int nents, unsigned long flags, |
79 | enum dma_transfer_direction dir, |
80 | dma_async_tx_callback cb, void *cb_param) |
81 | { |
82 | struct dma_async_tx_descriptor *desc; |
83 | dma_cookie_t cookie; |
84 | |
85 | if (!sg || !nents) |
86 | return -EINVAL; |
87 | |
88 | desc = dmaengine_prep_slave_sg(chan, sgl: sg, sg_len: nents, dir, flags); |
89 | if (!desc) |
90 | return -EINVAL; |
91 | |
92 | desc->callback = cb; |
93 | desc->callback_param = cb_param; |
94 | cookie = dmaengine_submit(desc); |
95 | |
96 | return dma_submit_error(cookie); |
97 | } |
98 | |
99 | int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg, |
100 | int rx_nents, struct scatterlist *tx_sg, int tx_nents, |
101 | dma_async_tx_callback cb, void *cb_param) |
102 | { |
103 | struct dma_chan *rxchan = dma->rxchan; |
104 | struct dma_chan *txchan = dma->txchan; |
105 | unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; |
106 | int ret; |
107 | |
108 | ret = qce_dma_prep_sg(chan: rxchan, sg: rx_sg, nents: rx_nents, flags, dir: DMA_MEM_TO_DEV, |
109 | NULL, NULL); |
110 | if (ret) |
111 | return ret; |
112 | |
113 | return qce_dma_prep_sg(chan: txchan, sg: tx_sg, nents: tx_nents, flags, dir: DMA_DEV_TO_MEM, |
114 | cb, cb_param); |
115 | } |
116 | |
117 | void qce_dma_issue_pending(struct qce_dma_data *dma) |
118 | { |
119 | dma_async_issue_pending(chan: dma->rxchan); |
120 | dma_async_issue_pending(chan: dma->txchan); |
121 | } |
122 | |
123 | int qce_dma_terminate_all(struct qce_dma_data *dma) |
124 | { |
125 | int ret; |
126 | |
127 | ret = dmaengine_terminate_all(chan: dma->rxchan); |
128 | return ret ?: dmaengine_terminate_all(chan: dma->txchan); |
129 | } |
130 | |