1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
2 | /* |
3 | * Copyright 2014-2016 Freescale Semiconductor Inc. |
4 | * Copyright 2016-2019 NXP |
5 | * |
6 | */ |
7 | #include <linux/types.h> |
8 | #include <linux/fsl/mc.h> |
9 | #include <soc/fsl/dpaa2-io.h> |
10 | #include <linux/init.h> |
11 | #include <linux/module.h> |
12 | #include <linux/platform_device.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/dma-mapping.h> |
15 | #include <linux/dim.h> |
16 | #include <linux/slab.h> |
17 | |
18 | #include "dpio.h" |
19 | #include "qbman-portal.h" |
20 | |
21 | struct dpaa2_io { |
22 | struct dpaa2_io_desc dpio_desc; |
23 | struct qbman_swp_desc swp_desc; |
24 | struct qbman_swp *swp; |
25 | struct list_head node; |
26 | /* protect against multiple management commands */ |
27 | spinlock_t lock_mgmt_cmd; |
28 | /* protect notifications list */ |
29 | spinlock_t lock_notifications; |
30 | struct list_head notifications; |
31 | struct device *dev; |
32 | |
33 | /* Net DIM */ |
34 | struct dim rx_dim; |
35 | /* protect against concurrent Net DIM updates */ |
36 | spinlock_t dim_lock; |
37 | u16 event_ctr; |
38 | u64 bytes; |
39 | u64 frames; |
40 | }; |
41 | |
42 | struct dpaa2_io_store { |
43 | unsigned int max; |
44 | dma_addr_t paddr; |
45 | struct dpaa2_dq *vaddr; |
46 | void *alloced_addr; /* unaligned value from kmalloc() */ |
47 | unsigned int idx; /* position of the next-to-be-returned entry */ |
48 | struct qbman_swp *swp; /* portal used to issue VDQCR */ |
49 | struct device *dev; /* device used for DMA mapping */ |
50 | }; |
51 | |
52 | /* keep a per cpu array of DPIOs for fast access */ |
53 | static struct dpaa2_io *dpio_by_cpu[NR_CPUS]; |
54 | static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list); |
55 | static DEFINE_SPINLOCK(dpio_list_lock); |
56 | |
57 | static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d, |
58 | int cpu) |
59 | { |
60 | if (d) |
61 | return d; |
62 | |
63 | if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus()) |
64 | return NULL; |
65 | |
66 | /* |
67 | * If cpu == -1, choose the current cpu, with no guarantees about |
68 | * potentially being migrated away. |
69 | */ |
70 | if (cpu < 0) |
71 | cpu = raw_smp_processor_id(); |
72 | |
73 | /* If a specific cpu was requested, pick it up immediately */ |
74 | return dpio_by_cpu[cpu]; |
75 | } |
76 | |
77 | static inline struct dpaa2_io *service_select(struct dpaa2_io *d) |
78 | { |
79 | if (d) |
80 | return d; |
81 | |
82 | d = service_select_by_cpu(d, cpu: -1); |
83 | if (d) |
84 | return d; |
85 | |
86 | spin_lock(lock: &dpio_list_lock); |
87 | d = list_entry(dpio_list.next, struct dpaa2_io, node); |
88 | list_del(entry: &d->node); |
89 | list_add_tail(new: &d->node, head: &dpio_list); |
90 | spin_unlock(lock: &dpio_list_lock); |
91 | |
92 | return d; |
93 | } |
94 | |
95 | /** |
96 | * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu |
97 | * @cpu: the cpu id |
98 | * |
99 | * Return the affine dpaa2_io service, or NULL if there is no service affined |
100 | * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available |
101 | * service. |
102 | */ |
103 | struct dpaa2_io *dpaa2_io_service_select(int cpu) |
104 | { |
105 | if (cpu == DPAA2_IO_ANY_CPU) |
106 | return service_select(NULL); |
107 | |
108 | return service_select_by_cpu(NULL, cpu); |
109 | } |
110 | EXPORT_SYMBOL_GPL(dpaa2_io_service_select); |
111 | |
112 | static void dpaa2_io_dim_work(struct work_struct *w) |
113 | { |
114 | struct dim *dim = container_of(w, struct dim, work); |
115 | struct dim_cq_moder moder = |
116 | net_dim_get_rx_moderation(cq_period_mode: dim->mode, ix: dim->profile_ix); |
117 | struct dpaa2_io *d = container_of(dim, struct dpaa2_io, rx_dim); |
118 | |
119 | dpaa2_io_set_irq_coalescing(d, irq_holdoff: moder.usec); |
120 | dim->state = DIM_START_MEASURE; |
121 | } |
122 | |
123 | /** |
124 | * dpaa2_io_create() - create a dpaa2_io object. |
125 | * @desc: the dpaa2_io descriptor |
126 | * @dev: the actual DPIO device |
127 | * |
128 | * Activates a "struct dpaa2_io" corresponding to the given config of an actual |
129 | * DPIO object. |
130 | * |
131 | * Return a valid dpaa2_io object for success, or NULL for failure. |
132 | */ |
133 | struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc, |
134 | struct device *dev) |
135 | { |
136 | struct dpaa2_io *obj = kmalloc(size: sizeof(*obj), GFP_KERNEL); |
137 | u32 qman_256_cycles_per_ns; |
138 | |
139 | if (!obj) |
140 | return NULL; |
141 | |
142 | /* check if CPU is out of range (-1 means any cpu) */ |
143 | if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) { |
144 | kfree(objp: obj); |
145 | return NULL; |
146 | } |
147 | |
148 | obj->dpio_desc = *desc; |
149 | obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena; |
150 | obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh; |
151 | obj->swp_desc.qman_clk = obj->dpio_desc.qman_clk; |
152 | obj->swp_desc.qman_version = obj->dpio_desc.qman_version; |
153 | |
154 | /* Compute how many 256 QBMAN cycles fit into one ns. This is because |
155 | * the interrupt timeout period register needs to be specified in QBMAN |
156 | * clock cycles in increments of 256. |
157 | */ |
158 | qman_256_cycles_per_ns = 256000 / (obj->swp_desc.qman_clk / 1000000); |
159 | obj->swp_desc.qman_256_cycles_per_ns = qman_256_cycles_per_ns; |
160 | obj->swp = qbman_swp_init(d: &obj->swp_desc); |
161 | |
162 | if (!obj->swp) { |
163 | kfree(objp: obj); |
164 | return NULL; |
165 | } |
166 | |
167 | INIT_LIST_HEAD(list: &obj->node); |
168 | spin_lock_init(&obj->lock_mgmt_cmd); |
169 | spin_lock_init(&obj->lock_notifications); |
170 | spin_lock_init(&obj->dim_lock); |
171 | INIT_LIST_HEAD(list: &obj->notifications); |
172 | |
173 | /* For now only enable DQRR interrupts */ |
174 | qbman_swp_interrupt_set_trigger(p: obj->swp, |
175 | QBMAN_SWP_INTERRUPT_DQRI); |
176 | qbman_swp_interrupt_clear_status(p: obj->swp, mask: 0xffffffff); |
177 | if (obj->dpio_desc.receives_notifications) |
178 | qbman_swp_push_set(p: obj->swp, channel_idx: 0, enable: 1); |
179 | |
180 | spin_lock(lock: &dpio_list_lock); |
181 | list_add_tail(new: &obj->node, head: &dpio_list); |
182 | if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu]) |
183 | dpio_by_cpu[desc->cpu] = obj; |
184 | spin_unlock(lock: &dpio_list_lock); |
185 | |
186 | obj->dev = dev; |
187 | |
188 | memset(&obj->rx_dim, 0, sizeof(obj->rx_dim)); |
189 | INIT_WORK(&obj->rx_dim.work, dpaa2_io_dim_work); |
190 | obj->event_ctr = 0; |
191 | obj->bytes = 0; |
192 | obj->frames = 0; |
193 | |
194 | return obj; |
195 | } |
196 | |
197 | /** |
198 | * dpaa2_io_down() - release the dpaa2_io object. |
199 | * @d: the dpaa2_io object to be released. |
200 | * |
201 | * The "struct dpaa2_io" type can represent an individual DPIO object (as |
202 | * described by "struct dpaa2_io_desc") or an instance of a "DPIO service", |
203 | * which can be used to group/encapsulate multiple DPIO objects. In all cases, |
204 | * each handle obtained should be released using this function. |
205 | */ |
206 | void dpaa2_io_down(struct dpaa2_io *d) |
207 | { |
208 | spin_lock(lock: &dpio_list_lock); |
209 | dpio_by_cpu[d->dpio_desc.cpu] = NULL; |
210 | list_del(entry: &d->node); |
211 | spin_unlock(lock: &dpio_list_lock); |
212 | |
213 | kfree(objp: d); |
214 | } |
215 | |
216 | #define DPAA_POLL_MAX 32 |
217 | |
218 | /** |
219 | * dpaa2_io_irq() - ISR for DPIO interrupts |
220 | * |
221 | * @obj: the given DPIO object. |
222 | * |
223 | * Return IRQ_HANDLED for success or IRQ_NONE if there |
224 | * were no pending interrupts. |
225 | */ |
226 | irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj) |
227 | { |
228 | const struct dpaa2_dq *dq; |
229 | int max = 0; |
230 | struct qbman_swp *swp; |
231 | u32 status; |
232 | |
233 | obj->event_ctr++; |
234 | |
235 | swp = obj->swp; |
236 | status = qbman_swp_interrupt_read_status(p: swp); |
237 | if (!status) |
238 | return IRQ_NONE; |
239 | |
240 | dq = qbman_swp_dqrr_next(s: swp); |
241 | while (dq) { |
242 | if (qbman_result_is_SCN(dq)) { |
243 | struct dpaa2_io_notification_ctx *ctx; |
244 | u64 q64; |
245 | |
246 | q64 = qbman_result_SCN_ctx(scn: dq); |
247 | ctx = (void *)(uintptr_t)q64; |
248 | ctx->cb(ctx); |
249 | } else { |
250 | pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n" ); |
251 | } |
252 | qbman_swp_dqrr_consume(s: swp, dq); |
253 | ++max; |
254 | if (max > DPAA_POLL_MAX) |
255 | goto done; |
256 | dq = qbman_swp_dqrr_next(s: swp); |
257 | } |
258 | done: |
259 | qbman_swp_interrupt_clear_status(p: swp, mask: status); |
260 | qbman_swp_interrupt_set_inhibit(p: swp, inhibit: 0); |
261 | return IRQ_HANDLED; |
262 | } |
263 | |
264 | /** |
265 | * dpaa2_io_get_cpu() - get the cpu associated with a given DPIO object |
266 | * |
267 | * @d: the given DPIO object. |
268 | * |
269 | * Return the cpu associated with the DPIO object |
270 | */ |
271 | int dpaa2_io_get_cpu(struct dpaa2_io *d) |
272 | { |
273 | return d->dpio_desc.cpu; |
274 | } |
275 | EXPORT_SYMBOL(dpaa2_io_get_cpu); |
276 | |
277 | /** |
278 | * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN |
279 | * notifications on the given DPIO service. |
280 | * @d: the given DPIO service. |
281 | * @ctx: the notification context. |
282 | * @dev: the device that requests the register |
283 | * |
284 | * The caller should make the MC command to attach a DPAA2 object to |
285 | * a DPIO after this function completes successfully. In that way: |
286 | * (a) The DPIO service is "ready" to handle a notification arrival |
287 | * (which might happen before the "attach" command to MC has |
288 | * returned control of execution back to the caller) |
289 | * (b) The DPIO service can provide back to the caller the 'dpio_id' and |
290 | * 'qman64' parameters that it should pass along in the MC command |
291 | * in order for the object to be configured to produce the right |
292 | * notification fields to the DPIO service. |
293 | * |
294 | * Return 0 for success, or -ENODEV for failure. |
295 | */ |
296 | int dpaa2_io_service_register(struct dpaa2_io *d, |
297 | struct dpaa2_io_notification_ctx *ctx, |
298 | struct device *dev) |
299 | { |
300 | struct device_link *link; |
301 | unsigned long irqflags; |
302 | |
303 | d = service_select_by_cpu(d, cpu: ctx->desired_cpu); |
304 | if (!d) |
305 | return -ENODEV; |
306 | |
307 | link = device_link_add(consumer: dev, supplier: d->dev, DL_FLAG_AUTOREMOVE_CONSUMER); |
308 | if (!link) |
309 | return -EINVAL; |
310 | |
311 | ctx->dpio_id = d->dpio_desc.dpio_id; |
312 | ctx->qman64 = (u64)(uintptr_t)ctx; |
313 | ctx->dpio_private = d; |
314 | spin_lock_irqsave(&d->lock_notifications, irqflags); |
315 | list_add(new: &ctx->node, head: &d->notifications); |
316 | spin_unlock_irqrestore(lock: &d->lock_notifications, flags: irqflags); |
317 | |
318 | /* Enable the generation of CDAN notifications */ |
319 | if (ctx->is_cdan) |
320 | return qbman_swp_CDAN_set_context_enable(s: d->swp, |
321 | channelid: (u16)ctx->id, |
322 | ctx: ctx->qman64); |
323 | return 0; |
324 | } |
325 | EXPORT_SYMBOL_GPL(dpaa2_io_service_register); |
326 | |
327 | /** |
328 | * dpaa2_io_service_deregister - The opposite of 'register'. |
329 | * @service: the given DPIO service. |
330 | * @ctx: the notification context. |
331 | * @dev: the device that requests to be deregistered |
332 | * |
333 | * This function should be called only after sending the MC command to |
334 | * to detach the notification-producing device from the DPIO. |
335 | */ |
336 | void dpaa2_io_service_deregister(struct dpaa2_io *service, |
337 | struct dpaa2_io_notification_ctx *ctx, |
338 | struct device *dev) |
339 | { |
340 | struct dpaa2_io *d = ctx->dpio_private; |
341 | unsigned long irqflags; |
342 | |
343 | if (ctx->is_cdan) |
344 | qbman_swp_CDAN_disable(s: d->swp, channelid: (u16)ctx->id); |
345 | |
346 | spin_lock_irqsave(&d->lock_notifications, irqflags); |
347 | list_del(entry: &ctx->node); |
348 | spin_unlock_irqrestore(lock: &d->lock_notifications, flags: irqflags); |
349 | |
350 | } |
351 | EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister); |
352 | |
353 | /** |
354 | * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service. |
355 | * @d: the given DPIO service. |
356 | * @ctx: the notification context. |
357 | * |
358 | * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is |
359 | * considered "disarmed". Ie. the user can issue pull dequeue operations on that |
360 | * traffic source for as long as it likes. Eventually it may wish to "rearm" |
361 | * that source to allow it to produce another FQDAN/CDAN, that's what this |
362 | * function achieves. |
363 | * |
364 | * Return 0 for success. |
365 | */ |
366 | int dpaa2_io_service_rearm(struct dpaa2_io *d, |
367 | struct dpaa2_io_notification_ctx *ctx) |
368 | { |
369 | unsigned long irqflags; |
370 | int err; |
371 | |
372 | d = service_select_by_cpu(d, cpu: ctx->desired_cpu); |
373 | if (!unlikely(d)) |
374 | return -ENODEV; |
375 | |
376 | spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags); |
377 | if (ctx->is_cdan) |
378 | err = qbman_swp_CDAN_enable(s: d->swp, channelid: (u16)ctx->id); |
379 | else |
380 | err = qbman_swp_fq_schedule(s: d->swp, fqid: ctx->id); |
381 | spin_unlock_irqrestore(lock: &d->lock_mgmt_cmd, flags: irqflags); |
382 | |
383 | return err; |
384 | } |
385 | EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm); |
386 | |
387 | /** |
388 | * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq. |
389 | * @d: the given DPIO service. |
390 | * @fqid: the given frame queue id. |
391 | * @s: the dpaa2_io_store object for the result. |
392 | * |
393 | * Return 0 for success, or error code for failure. |
394 | */ |
395 | int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid, |
396 | struct dpaa2_io_store *s) |
397 | { |
398 | struct qbman_pull_desc pd; |
399 | int err; |
400 | |
401 | qbman_pull_desc_clear(d: &pd); |
402 | qbman_pull_desc_set_storage(d: &pd, storage: s->vaddr, storage_phys: s->paddr, stash: 1); |
403 | qbman_pull_desc_set_numframes(d: &pd, numframes: (u8)s->max); |
404 | qbman_pull_desc_set_fq(d: &pd, fqid); |
405 | |
406 | d = service_select(d); |
407 | if (!d) |
408 | return -ENODEV; |
409 | s->swp = d->swp; |
410 | err = qbman_swp_pull(s: d->swp, d: &pd); |
411 | if (err) |
412 | s->swp = NULL; |
413 | |
414 | return err; |
415 | } |
416 | EXPORT_SYMBOL(dpaa2_io_service_pull_fq); |
417 | |
418 | /** |
419 | * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel. |
420 | * @d: the given DPIO service. |
421 | * @channelid: the given channel id. |
422 | * @s: the dpaa2_io_store object for the result. |
423 | * |
424 | * Return 0 for success, or error code for failure. |
425 | */ |
426 | int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid, |
427 | struct dpaa2_io_store *s) |
428 | { |
429 | struct qbman_pull_desc pd; |
430 | int err; |
431 | |
432 | qbman_pull_desc_clear(d: &pd); |
433 | qbman_pull_desc_set_storage(d: &pd, storage: s->vaddr, storage_phys: s->paddr, stash: 1); |
434 | qbman_pull_desc_set_numframes(d: &pd, numframes: (u8)s->max); |
435 | qbman_pull_desc_set_channel(d: &pd, chid: channelid, dct: qbman_pull_type_prio); |
436 | |
437 | d = service_select(d); |
438 | if (!d) |
439 | return -ENODEV; |
440 | |
441 | s->swp = d->swp; |
442 | err = qbman_swp_pull(s: d->swp, d: &pd); |
443 | if (err) |
444 | s->swp = NULL; |
445 | |
446 | return err; |
447 | } |
448 | EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel); |
449 | |
450 | /** |
451 | * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue. |
452 | * @d: the given DPIO service. |
453 | * @fqid: the given frame queue id. |
454 | * @fd: the frame descriptor which is enqueued. |
455 | * |
456 | * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready, |
457 | * or -ENODEV if there is no dpio service. |
458 | */ |
459 | int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, |
460 | u32 fqid, |
461 | const struct dpaa2_fd *fd) |
462 | { |
463 | struct qbman_eq_desc ed; |
464 | |
465 | d = service_select(d); |
466 | if (!d) |
467 | return -ENODEV; |
468 | |
469 | qbman_eq_desc_clear(d: &ed); |
470 | qbman_eq_desc_set_no_orp(d: &ed, respond_success: 0); |
471 | qbman_eq_desc_set_fq(d: &ed, fqid); |
472 | |
473 | return qbman_swp_enqueue(s: d->swp, d: &ed, fd); |
474 | } |
475 | EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq); |
476 | |
477 | /** |
478 | * dpaa2_io_service_enqueue_multiple_fq() - Enqueue multiple frames |
479 | * to a frame queue using one fqid. |
480 | * @d: the given DPIO service. |
481 | * @fqid: the given frame queue id. |
482 | * @fd: the frame descriptor which is enqueued. |
483 | * @nb: number of frames to be enqueud |
484 | * |
485 | * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready, |
486 | * or -ENODEV if there is no dpio service. |
487 | */ |
488 | int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d, |
489 | u32 fqid, |
490 | const struct dpaa2_fd *fd, |
491 | int nb) |
492 | { |
493 | struct qbman_eq_desc ed; |
494 | |
495 | d = service_select(d); |
496 | if (!d) |
497 | return -ENODEV; |
498 | |
499 | qbman_eq_desc_clear(d: &ed); |
500 | qbman_eq_desc_set_no_orp(d: &ed, respond_success: 0); |
501 | qbman_eq_desc_set_fq(d: &ed, fqid); |
502 | |
503 | return qbman_swp_enqueue_multiple(s: d->swp, d: &ed, fd, NULL, num_frames: nb); |
504 | } |
505 | EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq); |
506 | |
507 | /** |
508 | * dpaa2_io_service_enqueue_multiple_desc_fq() - Enqueue multiple frames |
509 | * to different frame queue using a list of fqids. |
510 | * @d: the given DPIO service. |
511 | * @fqid: the given list of frame queue ids. |
512 | * @fd: the frame descriptor which is enqueued. |
513 | * @nb: number of frames to be enqueud |
514 | * |
515 | * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready, |
516 | * or -ENODEV if there is no dpio service. |
517 | */ |
518 | int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d, |
519 | u32 *fqid, |
520 | const struct dpaa2_fd *fd, |
521 | int nb) |
522 | { |
523 | struct qbman_eq_desc *ed; |
524 | int i, ret; |
525 | |
526 | ed = kcalloc(n: 32, size: sizeof(struct qbman_eq_desc), GFP_KERNEL); |
527 | if (!ed) |
528 | return -ENOMEM; |
529 | |
530 | d = service_select(d); |
531 | if (!d) { |
532 | ret = -ENODEV; |
533 | goto out; |
534 | } |
535 | |
536 | for (i = 0; i < nb; i++) { |
537 | qbman_eq_desc_clear(d: &ed[i]); |
538 | qbman_eq_desc_set_no_orp(d: &ed[i], respond_success: 0); |
539 | qbman_eq_desc_set_fq(d: &ed[i], fqid: fqid[i]); |
540 | } |
541 | |
542 | ret = qbman_swp_enqueue_multiple_desc(s: d->swp, d: &ed[0], fd, num_frames: nb); |
543 | out: |
544 | kfree(objp: ed); |
545 | return ret; |
546 | } |
547 | EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq); |
548 | |
549 | /** |
550 | * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD. |
551 | * @d: the given DPIO service. |
552 | * @qdid: the given queuing destination id. |
553 | * @prio: the given queuing priority. |
554 | * @qdbin: the given queuing destination bin. |
555 | * @fd: the frame descriptor which is enqueued. |
556 | * |
557 | * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready, |
558 | * or -ENODEV if there is no dpio service. |
559 | */ |
560 | int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, |
561 | u32 qdid, u8 prio, u16 qdbin, |
562 | const struct dpaa2_fd *fd) |
563 | { |
564 | struct qbman_eq_desc ed; |
565 | |
566 | d = service_select(d); |
567 | if (!d) |
568 | return -ENODEV; |
569 | |
570 | qbman_eq_desc_clear(d: &ed); |
571 | qbman_eq_desc_set_no_orp(d: &ed, respond_success: 0); |
572 | qbman_eq_desc_set_qd(d: &ed, qdid, qd_bin: qdbin, qd_prio: prio); |
573 | |
574 | return qbman_swp_enqueue(s: d->swp, d: &ed, fd); |
575 | } |
576 | EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd); |
577 | |
578 | /** |
579 | * dpaa2_io_service_release() - Release buffers to a buffer pool. |
580 | * @d: the given DPIO object. |
581 | * @bpid: the buffer pool id. |
582 | * @buffers: the buffers to be released. |
583 | * @num_buffers: the number of the buffers to be released. |
584 | * |
585 | * Return 0 for success, and negative error code for failure. |
586 | */ |
587 | int dpaa2_io_service_release(struct dpaa2_io *d, |
588 | u16 bpid, |
589 | const u64 *buffers, |
590 | unsigned int num_buffers) |
591 | { |
592 | struct qbman_release_desc rd; |
593 | |
594 | d = service_select(d); |
595 | if (!d) |
596 | return -ENODEV; |
597 | |
598 | qbman_release_desc_clear(d: &rd); |
599 | qbman_release_desc_set_bpid(d: &rd, bpid); |
600 | |
601 | return qbman_swp_release(s: d->swp, d: &rd, buffers, num_buffers); |
602 | } |
603 | EXPORT_SYMBOL_GPL(dpaa2_io_service_release); |
604 | |
605 | /** |
606 | * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool. |
607 | * @d: the given DPIO object. |
608 | * @bpid: the buffer pool id. |
609 | * @buffers: the buffer addresses for acquired buffers. |
610 | * @num_buffers: the expected number of the buffers to acquire. |
611 | * |
612 | * Return a negative error code if the command failed, otherwise it returns |
613 | * the number of buffers acquired, which may be less than the number requested. |
614 | * Eg. if the buffer pool is empty, this will return zero. |
615 | */ |
616 | int dpaa2_io_service_acquire(struct dpaa2_io *d, |
617 | u16 bpid, |
618 | u64 *buffers, |
619 | unsigned int num_buffers) |
620 | { |
621 | unsigned long irqflags; |
622 | int err; |
623 | |
624 | d = service_select(d); |
625 | if (!d) |
626 | return -ENODEV; |
627 | |
628 | spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags); |
629 | err = qbman_swp_acquire(s: d->swp, bpid, buffers, num_buffers); |
630 | spin_unlock_irqrestore(lock: &d->lock_mgmt_cmd, flags: irqflags); |
631 | |
632 | return err; |
633 | } |
634 | EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire); |
635 | |
636 | /* |
637 | * 'Stores' are reusable memory blocks for holding dequeue results, and to |
638 | * assist with parsing those results. |
639 | */ |
640 | |
641 | /** |
642 | * dpaa2_io_store_create() - Create the dma memory storage for dequeue result. |
643 | * @max_frames: the maximum number of dequeued result for frames, must be <= 32. |
644 | * @dev: the device to allow mapping/unmapping the DMAable region. |
645 | * |
646 | * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)". |
647 | * The 'dpaa2_io_store' returned is a DPIO service managed object. |
648 | * |
649 | * Return pointer to dpaa2_io_store struct for successfully created storage |
650 | * memory, or NULL on error. |
651 | */ |
652 | struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, |
653 | struct device *dev) |
654 | { |
655 | struct dpaa2_io_store *ret; |
656 | size_t size; |
657 | |
658 | if (!max_frames || (max_frames > 32)) |
659 | return NULL; |
660 | |
661 | ret = kmalloc(size: sizeof(*ret), GFP_KERNEL); |
662 | if (!ret) |
663 | return NULL; |
664 | |
665 | ret->max = max_frames; |
666 | size = max_frames * sizeof(struct dpaa2_dq) + 64; |
667 | ret->alloced_addr = kzalloc(size, GFP_KERNEL); |
668 | if (!ret->alloced_addr) { |
669 | kfree(objp: ret); |
670 | return NULL; |
671 | } |
672 | |
673 | ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64); |
674 | ret->paddr = dma_map_single(dev, ret->vaddr, |
675 | sizeof(struct dpaa2_dq) * max_frames, |
676 | DMA_FROM_DEVICE); |
677 | if (dma_mapping_error(dev, dma_addr: ret->paddr)) { |
678 | kfree(objp: ret->alloced_addr); |
679 | kfree(objp: ret); |
680 | return NULL; |
681 | } |
682 | |
683 | ret->idx = 0; |
684 | ret->dev = dev; |
685 | |
686 | return ret; |
687 | } |
688 | EXPORT_SYMBOL_GPL(dpaa2_io_store_create); |
689 | |
690 | /** |
691 | * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue |
692 | * result. |
693 | * @s: the storage memory to be destroyed. |
694 | */ |
695 | void dpaa2_io_store_destroy(struct dpaa2_io_store *s) |
696 | { |
697 | dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max, |
698 | DMA_FROM_DEVICE); |
699 | kfree(objp: s->alloced_addr); |
700 | kfree(objp: s); |
701 | } |
702 | EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy); |
703 | |
704 | /** |
705 | * dpaa2_io_store_next() - Determine when the next dequeue result is available. |
706 | * @s: the dpaa2_io_store object. |
707 | * @is_last: indicate whether this is the last frame in the pull command. |
708 | * |
709 | * When an object driver performs dequeues to a dpaa2_io_store, this function |
710 | * can be used to determine when the next frame result is available. Once |
711 | * this function returns non-NULL, a subsequent call to it will try to find |
712 | * the next dequeue result. |
713 | * |
714 | * Note that if a pull-dequeue has a NULL result because the target FQ/channel |
715 | * was empty, then this function will also return NULL (rather than expecting |
716 | * the caller to always check for this. As such, "is_last" can be used to |
717 | * differentiate between "end-of-empty-dequeue" and "still-waiting". |
718 | * |
719 | * Return dequeue result for a valid dequeue result, or NULL for empty dequeue. |
720 | */ |
721 | struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last) |
722 | { |
723 | int match; |
724 | struct dpaa2_dq *ret = &s->vaddr[s->idx]; |
725 | |
726 | match = qbman_result_has_new_result(p: s->swp, dq: ret); |
727 | if (!match) { |
728 | *is_last = 0; |
729 | return NULL; |
730 | } |
731 | |
732 | s->idx++; |
733 | |
734 | if (dpaa2_dq_is_pull_complete(dq: ret)) { |
735 | *is_last = 1; |
736 | s->idx = 0; |
737 | /* |
738 | * If we get an empty dequeue result to terminate a zero-results |
739 | * vdqcr, return NULL to the caller rather than expecting him to |
740 | * check non-NULL results every time. |
741 | */ |
742 | if (!(dpaa2_dq_flags(dq: ret) & DPAA2_DQ_STAT_VALIDFRAME)) |
743 | ret = NULL; |
744 | } else { |
745 | prefetch(x: &s->vaddr[s->idx]); |
746 | *is_last = 0; |
747 | } |
748 | |
749 | return ret; |
750 | } |
751 | EXPORT_SYMBOL_GPL(dpaa2_io_store_next); |
752 | |
753 | /** |
754 | * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq. |
755 | * @d: the given DPIO object. |
756 | * @fqid: the id of frame queue to be queried. |
757 | * @fcnt: the queried frame count. |
758 | * @bcnt: the queried byte count. |
759 | * |
760 | * Knowing the FQ count at run-time can be useful in debugging situations. |
761 | * The instantaneous frame- and byte-count are hereby returned. |
762 | * |
763 | * Return 0 for a successful query, and negative error code if query fails. |
764 | */ |
765 | int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid, |
766 | u32 *fcnt, u32 *bcnt) |
767 | { |
768 | struct qbman_fq_query_np_rslt state; |
769 | struct qbman_swp *swp; |
770 | unsigned long irqflags; |
771 | int ret; |
772 | |
773 | d = service_select(d); |
774 | if (!d) |
775 | return -ENODEV; |
776 | |
777 | swp = d->swp; |
778 | spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags); |
779 | ret = qbman_fq_query_state(s: swp, fqid, r: &state); |
780 | spin_unlock_irqrestore(lock: &d->lock_mgmt_cmd, flags: irqflags); |
781 | if (ret) |
782 | return ret; |
783 | *fcnt = qbman_fq_state_frame_count(r: &state); |
784 | *bcnt = qbman_fq_state_byte_count(r: &state); |
785 | |
786 | return 0; |
787 | } |
788 | EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count); |
789 | |
790 | /** |
791 | * dpaa2_io_query_bp_count() - Query the number of buffers currently in a |
792 | * buffer pool. |
793 | * @d: the given DPIO object. |
794 | * @bpid: the index of buffer pool to be queried. |
795 | * @num: the queried number of buffers in the buffer pool. |
796 | * |
797 | * Return 0 for a successful query, and negative error code if query fails. |
798 | */ |
799 | int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num) |
800 | { |
801 | struct qbman_bp_query_rslt state; |
802 | struct qbman_swp *swp; |
803 | unsigned long irqflags; |
804 | int ret; |
805 | |
806 | d = service_select(d); |
807 | if (!d) |
808 | return -ENODEV; |
809 | |
810 | swp = d->swp; |
811 | spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags); |
812 | ret = qbman_bp_query(s: swp, bpid, r: &state); |
813 | spin_unlock_irqrestore(lock: &d->lock_mgmt_cmd, flags: irqflags); |
814 | if (ret) |
815 | return ret; |
816 | *num = qbman_bp_info_num_free_bufs(a: &state); |
817 | return 0; |
818 | } |
819 | EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count); |
820 | |
821 | /** |
822 | * dpaa2_io_set_irq_coalescing() - Set new IRQ coalescing values |
823 | * @d: the given DPIO object |
824 | * @irq_holdoff: interrupt holdoff (timeout) period in us |
825 | * |
826 | * Return 0 for success, or negative error code on error. |
827 | */ |
828 | int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff) |
829 | { |
830 | struct qbman_swp *swp = d->swp; |
831 | |
832 | return qbman_swp_set_irq_coalescing(p: swp, irq_threshold: swp->dqrr.dqrr_size - 1, |
833 | irq_holdoff); |
834 | } |
835 | EXPORT_SYMBOL(dpaa2_io_set_irq_coalescing); |
836 | |
837 | /** |
838 | * dpaa2_io_get_irq_coalescing() - Get the current IRQ coalescing parameters |
839 | * @d: the given DPIO object |
840 | * @irq_holdoff: interrupt holdoff (timeout) period in us |
841 | */ |
842 | void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff) |
843 | { |
844 | struct qbman_swp *swp = d->swp; |
845 | |
846 | qbman_swp_get_irq_coalescing(p: swp, NULL, irq_holdoff); |
847 | } |
848 | EXPORT_SYMBOL(dpaa2_io_get_irq_coalescing); |
849 | |
850 | /** |
851 | * dpaa2_io_set_adaptive_coalescing() - Enable/disable adaptive coalescing |
852 | * @d: the given DPIO object |
853 | * @use_adaptive_rx_coalesce: adaptive coalescing state |
854 | */ |
855 | void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d, |
856 | int use_adaptive_rx_coalesce) |
857 | { |
858 | d->swp->use_adaptive_rx_coalesce = use_adaptive_rx_coalesce; |
859 | } |
860 | EXPORT_SYMBOL(dpaa2_io_set_adaptive_coalescing); |
861 | |
862 | /** |
863 | * dpaa2_io_get_adaptive_coalescing() - Query adaptive coalescing state |
864 | * @d: the given DPIO object |
865 | * |
866 | * Return 1 when adaptive coalescing is enabled on the DPIO object and 0 |
867 | * otherwise. |
868 | */ |
869 | int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d) |
870 | { |
871 | return d->swp->use_adaptive_rx_coalesce; |
872 | } |
873 | EXPORT_SYMBOL(dpaa2_io_get_adaptive_coalescing); |
874 | |
875 | /** |
876 | * dpaa2_io_update_net_dim() - Update Net DIM |
877 | * @d: the given DPIO object |
878 | * @frames: how many frames have been dequeued by the user since the last call |
879 | * @bytes: how many bytes have been dequeued by the user since the last call |
880 | */ |
881 | void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes) |
882 | { |
883 | struct dim_sample dim_sample = {}; |
884 | |
885 | if (!d->swp->use_adaptive_rx_coalesce) |
886 | return; |
887 | |
888 | spin_lock(lock: &d->dim_lock); |
889 | |
890 | d->bytes += bytes; |
891 | d->frames += frames; |
892 | |
893 | dim_update_sample(event_ctr: d->event_ctr, packets: d->frames, bytes: d->bytes, s: &dim_sample); |
894 | net_dim(dim: &d->rx_dim, end_sample: dim_sample); |
895 | |
896 | spin_unlock(lock: &d->dim_lock); |
897 | } |
898 | EXPORT_SYMBOL(dpaa2_io_update_net_dim); |
899 | |