1 | // SPDX-License-Identifier: BSD-3-Clause-Clear |
2 | /* |
3 | * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. |
4 | * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. |
5 | */ |
6 | |
7 | #include "core.h" |
8 | #include "debug.h" |
9 | |
10 | static int ath12k_dbring_bufs_replenish(struct ath12k *ar, |
11 | struct ath12k_dbring *ring, |
12 | struct ath12k_dbring_element *buff, |
13 | gfp_t gfp) |
14 | { |
15 | struct ath12k_base *ab = ar->ab; |
16 | struct hal_srng *srng; |
17 | dma_addr_t paddr; |
18 | void *ptr_aligned, *ptr_unaligned, *desc; |
19 | int ret; |
20 | int buf_id; |
21 | u32 cookie; |
22 | |
23 | srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; |
24 | |
25 | lockdep_assert_held(&srng->lock); |
26 | |
27 | ath12k_hal_srng_access_begin(ab, srng); |
28 | |
29 | ptr_unaligned = buff->payload; |
30 | ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align); |
31 | paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz, |
32 | DMA_FROM_DEVICE); |
33 | |
34 | ret = dma_mapping_error(dev: ab->dev, dma_addr: paddr); |
35 | if (ret) |
36 | goto err; |
37 | |
38 | spin_lock_bh(lock: &ring->idr_lock); |
39 | buf_id = idr_alloc(&ring->bufs_idr, ptr: buff, start: 0, end: ring->bufs_max, gfp); |
40 | spin_unlock_bh(lock: &ring->idr_lock); |
41 | if (buf_id < 0) { |
42 | ret = -ENOBUFS; |
43 | goto err_dma_unmap; |
44 | } |
45 | |
46 | desc = ath12k_hal_srng_src_get_next_entry(ab, srng); |
47 | if (!desc) { |
48 | ret = -ENOENT; |
49 | goto err_idr_remove; |
50 | } |
51 | |
52 | buff->paddr = paddr; |
53 | |
54 | cookie = u32_encode_bits(v: ar->pdev_idx, DP_RXDMA_BUF_COOKIE_PDEV_ID) | |
55 | u32_encode_bits(v: buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID); |
56 | |
57 | ath12k_hal_rx_buf_addr_info_set(binfo: desc, paddr, cookie, manager: 0); |
58 | |
59 | ath12k_hal_srng_access_end(ab, srng); |
60 | |
61 | return 0; |
62 | |
63 | err_idr_remove: |
64 | spin_lock_bh(lock: &ring->idr_lock); |
65 | idr_remove(&ring->bufs_idr, id: buf_id); |
66 | spin_unlock_bh(lock: &ring->idr_lock); |
67 | err_dma_unmap: |
68 | dma_unmap_single(ab->dev, paddr, ring->buf_sz, |
69 | DMA_FROM_DEVICE); |
70 | err: |
71 | ath12k_hal_srng_access_end(ab, srng); |
72 | return ret; |
73 | } |
74 | |
75 | static int ath12k_dbring_fill_bufs(struct ath12k *ar, |
76 | struct ath12k_dbring *ring, |
77 | gfp_t gfp) |
78 | { |
79 | struct ath12k_dbring_element *buff; |
80 | struct hal_srng *srng; |
81 | struct ath12k_base *ab = ar->ab; |
82 | int num_remain, req_entries, num_free; |
83 | u32 align; |
84 | int size, ret; |
85 | |
86 | srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; |
87 | |
88 | spin_lock_bh(lock: &srng->lock); |
89 | |
90 | num_free = ath12k_hal_srng_src_num_free(ab, srng, sync_hw_ptr: true); |
91 | req_entries = min(num_free, ring->bufs_max); |
92 | num_remain = req_entries; |
93 | align = ring->buf_align; |
94 | size = sizeof(*buff) + ring->buf_sz + align - 1; |
95 | |
96 | while (num_remain > 0) { |
97 | buff = kzalloc(size, flags: gfp); |
98 | if (!buff) |
99 | break; |
100 | |
101 | ret = ath12k_dbring_bufs_replenish(ar, ring, buff, gfp); |
102 | if (ret) { |
103 | ath12k_warn(ab, fmt: "failed to replenish db ring num_remain %d req_ent %d\n" , |
104 | num_remain, req_entries); |
105 | kfree(objp: buff); |
106 | break; |
107 | } |
108 | num_remain--; |
109 | } |
110 | |
111 | spin_unlock_bh(lock: &srng->lock); |
112 | |
113 | return num_remain; |
114 | } |
115 | |
116 | int ath12k_dbring_wmi_cfg_setup(struct ath12k *ar, |
117 | struct ath12k_dbring *ring, |
118 | enum wmi_direct_buffer_module id) |
119 | { |
120 | struct ath12k_wmi_pdev_dma_ring_cfg_arg arg = {0}; |
121 | int ret; |
122 | |
123 | if (id >= WMI_DIRECT_BUF_MAX) |
124 | return -EINVAL; |
125 | |
126 | arg.pdev_id = DP_SW2HW_MACID(ring->pdev_id); |
127 | arg.module_id = id; |
128 | arg.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr); |
129 | arg.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr); |
130 | arg.head_idx_paddr_lo = lower_32_bits(ring->hp_addr); |
131 | arg.head_idx_paddr_hi = upper_32_bits(ring->hp_addr); |
132 | arg.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr); |
133 | arg.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr); |
134 | arg.num_elems = ring->bufs_max; |
135 | arg.buf_size = ring->buf_sz; |
136 | arg.num_resp_per_event = ring->num_resp_per_event; |
137 | arg.event_timeout_ms = ring->event_timeout_ms; |
138 | |
139 | ret = ath12k_wmi_pdev_dma_ring_cfg(ar, arg: &arg); |
140 | if (ret) { |
141 | ath12k_warn(ab: ar->ab, fmt: "failed to setup db ring cfg\n" ); |
142 | return ret; |
143 | } |
144 | |
145 | return 0; |
146 | } |
147 | |
148 | int ath12k_dbring_set_cfg(struct ath12k *ar, struct ath12k_dbring *ring, |
149 | u32 num_resp_per_event, u32 event_timeout_ms, |
150 | int (*handler)(struct ath12k *, |
151 | struct ath12k_dbring_data *)) |
152 | { |
153 | if (WARN_ON(!ring)) |
154 | return -EINVAL; |
155 | |
156 | ring->num_resp_per_event = num_resp_per_event; |
157 | ring->event_timeout_ms = event_timeout_ms; |
158 | ring->handler = handler; |
159 | |
160 | return 0; |
161 | } |
162 | |
163 | int ath12k_dbring_buf_setup(struct ath12k *ar, |
164 | struct ath12k_dbring *ring, |
165 | struct ath12k_dbring_cap *db_cap) |
166 | { |
167 | struct ath12k_base *ab = ar->ab; |
168 | struct hal_srng *srng; |
169 | int ret; |
170 | |
171 | srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; |
172 | ring->bufs_max = ring->refill_srng.size / |
173 | ath12k_hal_srng_get_entrysize(ab, ring_type: HAL_RXDMA_DIR_BUF); |
174 | |
175 | ring->buf_sz = db_cap->min_buf_sz; |
176 | ring->buf_align = db_cap->min_buf_align; |
177 | ring->pdev_id = db_cap->pdev_id; |
178 | ring->hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng); |
179 | ring->tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng); |
180 | |
181 | ret = ath12k_dbring_fill_bufs(ar, ring, GFP_KERNEL); |
182 | |
183 | return ret; |
184 | } |
185 | |
186 | int ath12k_dbring_srng_setup(struct ath12k *ar, struct ath12k_dbring *ring, |
187 | int ring_num, int num_entries) |
188 | { |
189 | int ret; |
190 | |
191 | ret = ath12k_dp_srng_setup(ab: ar->ab, ring: &ring->refill_srng, type: HAL_RXDMA_DIR_BUF, |
192 | ring_num, mac_id: ar->pdev_idx, num_entries); |
193 | if (ret < 0) { |
194 | ath12k_warn(ab: ar->ab, fmt: "failed to setup srng: %d ring_id %d\n" , |
195 | ret, ring_num); |
196 | goto err; |
197 | } |
198 | |
199 | return 0; |
200 | err: |
201 | ath12k_dp_srng_cleanup(ab: ar->ab, ring: &ring->refill_srng); |
202 | return ret; |
203 | } |
204 | |
205 | int ath12k_dbring_get_cap(struct ath12k_base *ab, |
206 | u8 pdev_idx, |
207 | enum wmi_direct_buffer_module id, |
208 | struct ath12k_dbring_cap *db_cap) |
209 | { |
210 | int i; |
211 | |
212 | if (!ab->num_db_cap || !ab->db_caps) |
213 | return -ENOENT; |
214 | |
215 | if (id >= WMI_DIRECT_BUF_MAX) |
216 | return -EINVAL; |
217 | |
218 | for (i = 0; i < ab->num_db_cap; i++) { |
219 | if (pdev_idx == ab->db_caps[i].pdev_id && |
220 | id == ab->db_caps[i].id) { |
221 | *db_cap = ab->db_caps[i]; |
222 | |
223 | return 0; |
224 | } |
225 | } |
226 | |
227 | return -ENOENT; |
228 | } |
229 | |
230 | int ath12k_dbring_buffer_release_event(struct ath12k_base *ab, |
231 | struct ath12k_dbring_buf_release_event *ev) |
232 | { |
233 | struct ath12k_dbring *ring = NULL; |
234 | struct hal_srng *srng; |
235 | struct ath12k *ar; |
236 | struct ath12k_dbring_element *buff; |
237 | struct ath12k_dbring_data handler_data; |
238 | struct ath12k_buffer_addr desc; |
239 | u8 *vaddr_unalign; |
240 | u32 num_entry, num_buff_reaped; |
241 | u8 pdev_idx, rbm; |
242 | u32 cookie; |
243 | int buf_id; |
244 | int size; |
245 | dma_addr_t paddr; |
246 | int ret = 0; |
247 | |
248 | pdev_idx = le32_to_cpu(ev->fixed.pdev_id); |
249 | |
250 | if (pdev_idx >= ab->num_radios) { |
251 | ath12k_warn(ab, fmt: "Invalid pdev id %d\n" , pdev_idx); |
252 | return -EINVAL; |
253 | } |
254 | |
255 | if (ev->fixed.num_buf_release_entry != |
256 | ev->fixed.num_meta_data_entry) { |
257 | ath12k_warn(ab, fmt: "Buffer entry %d mismatch meta entry %d\n" , |
258 | ev->fixed.num_buf_release_entry, |
259 | ev->fixed.num_meta_data_entry); |
260 | return -EINVAL; |
261 | } |
262 | |
263 | ar = ab->pdevs[pdev_idx].ar; |
264 | |
265 | rcu_read_lock(); |
266 | if (!rcu_dereference(ab->pdevs_active[pdev_idx])) { |
267 | ret = -EINVAL; |
268 | goto rcu_unlock; |
269 | } |
270 | |
271 | switch (ev->fixed.module_id) { |
272 | case WMI_DIRECT_BUF_SPECTRAL: |
273 | break; |
274 | default: |
275 | ring = NULL; |
276 | ath12k_warn(ab, fmt: "Recv dma buffer release ev on unsupp module %d\n" , |
277 | ev->fixed.module_id); |
278 | break; |
279 | } |
280 | |
281 | if (!ring) { |
282 | ret = -EINVAL; |
283 | goto rcu_unlock; |
284 | } |
285 | |
286 | srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; |
287 | num_entry = le32_to_cpu(ev->fixed.num_buf_release_entry); |
288 | size = sizeof(*buff) + ring->buf_sz + ring->buf_align - 1; |
289 | num_buff_reaped = 0; |
290 | |
291 | spin_lock_bh(lock: &srng->lock); |
292 | |
293 | while (num_buff_reaped < num_entry) { |
294 | desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo; |
295 | desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi; |
296 | handler_data.meta = ev->meta_data[num_buff_reaped]; |
297 | |
298 | num_buff_reaped++; |
299 | |
300 | ath12k_hal_rx_buf_addr_info_get(binfo: &desc, paddr: &paddr, cookie: &cookie, rbm: &rbm); |
301 | |
302 | buf_id = u32_get_bits(v: cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); |
303 | |
304 | spin_lock_bh(lock: &ring->idr_lock); |
305 | buff = idr_find(&ring->bufs_idr, id: buf_id); |
306 | if (!buff) { |
307 | spin_unlock_bh(lock: &ring->idr_lock); |
308 | continue; |
309 | } |
310 | idr_remove(&ring->bufs_idr, id: buf_id); |
311 | spin_unlock_bh(lock: &ring->idr_lock); |
312 | |
313 | dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz, |
314 | DMA_FROM_DEVICE); |
315 | |
316 | if (ring->handler) { |
317 | vaddr_unalign = buff->payload; |
318 | handler_data.data = PTR_ALIGN(vaddr_unalign, |
319 | ring->buf_align); |
320 | handler_data.data_sz = ring->buf_sz; |
321 | |
322 | ring->handler(ar, &handler_data); |
323 | } |
324 | |
325 | memset(buff, 0, size); |
326 | ath12k_dbring_bufs_replenish(ar, ring, buff, GFP_ATOMIC); |
327 | } |
328 | |
329 | spin_unlock_bh(lock: &srng->lock); |
330 | |
331 | rcu_unlock: |
332 | rcu_read_unlock(); |
333 | |
334 | return ret; |
335 | } |
336 | |
337 | void ath12k_dbring_srng_cleanup(struct ath12k *ar, struct ath12k_dbring *ring) |
338 | { |
339 | ath12k_dp_srng_cleanup(ab: ar->ab, ring: &ring->refill_srng); |
340 | } |
341 | |
342 | void ath12k_dbring_buf_cleanup(struct ath12k *ar, struct ath12k_dbring *ring) |
343 | { |
344 | struct ath12k_dbring_element *buff; |
345 | int buf_id; |
346 | |
347 | spin_lock_bh(lock: &ring->idr_lock); |
348 | idr_for_each_entry(&ring->bufs_idr, buff, buf_id) { |
349 | idr_remove(&ring->bufs_idr, id: buf_id); |
350 | dma_unmap_single(ar->ab->dev, buff->paddr, |
351 | ring->buf_sz, DMA_FROM_DEVICE); |
352 | kfree(objp: buff); |
353 | } |
354 | |
355 | idr_destroy(&ring->bufs_idr); |
356 | spin_unlock_bh(lock: &ring->idr_lock); |
357 | } |
358 | |