1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
2 | /* |
3 | * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. |
4 | * Copyright 2016-2019 NXP |
5 | * |
6 | */ |
7 | |
8 | #include <asm/cacheflush.h> |
9 | #include <linux/io.h> |
10 | #include <linux/slab.h> |
11 | #include <linux/spinlock.h> |
12 | #include <soc/fsl/dpaa2-global.h> |
13 | |
14 | #include "qbman-portal.h" |
15 | |
16 | /* All QBMan command and result structures use this "valid bit" encoding */ |
17 | #define QB_VALID_BIT ((u32)0x80) |
18 | |
19 | /* QBMan portal management command codes */ |
20 | #define QBMAN_MC_ACQUIRE 0x30 |
21 | #define QBMAN_WQCHAN_CONFIGURE 0x46 |
22 | |
23 | /* CINH register offsets */ |
24 | #define QBMAN_CINH_SWP_EQCR_PI 0x800 |
25 | #define QBMAN_CINH_SWP_EQCR_CI 0x840 |
26 | #define QBMAN_CINH_SWP_EQAR 0x8c0 |
27 | #define QBMAN_CINH_SWP_CR_RT 0x900 |
28 | #define QBMAN_CINH_SWP_VDQCR_RT 0x940 |
29 | #define QBMAN_CINH_SWP_EQCR_AM_RT 0x980 |
30 | #define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0 |
31 | #define QBMAN_CINH_SWP_DQPI 0xa00 |
32 | #define QBMAN_CINH_SWP_DQRR_ITR 0xa80 |
33 | #define QBMAN_CINH_SWP_DCAP 0xac0 |
34 | #define QBMAN_CINH_SWP_SDQCR 0xb00 |
35 | #define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40 |
36 | #define QBMAN_CINH_SWP_RCR_PI 0xc00 |
37 | #define QBMAN_CINH_SWP_RAR 0xcc0 |
38 | #define QBMAN_CINH_SWP_ISR 0xe00 |
39 | #define QBMAN_CINH_SWP_IER 0xe40 |
40 | #define QBMAN_CINH_SWP_ISDR 0xe80 |
41 | #define QBMAN_CINH_SWP_IIR 0xec0 |
42 | #define QBMAN_CINH_SWP_ITPR 0xf40 |
43 | |
44 | /* CENA register offsets */ |
45 | #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6)) |
46 | #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6)) |
47 | #define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6)) |
48 | #define QBMAN_CENA_SWP_CR 0x600 |
49 | #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1)) |
50 | #define QBMAN_CENA_SWP_VDQCR 0x780 |
51 | #define QBMAN_CENA_SWP_EQCR_CI 0x840 |
52 | #define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840 |
53 | |
54 | /* CENA register offsets in memory-backed mode */ |
55 | #define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6)) |
56 | #define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6)) |
57 | #define QBMAN_CENA_SWP_CR_MEM 0x1600 |
58 | #define QBMAN_CENA_SWP_RR_MEM 0x1680 |
59 | #define QBMAN_CENA_SWP_VDQCR_MEM 0x1780 |
60 | |
61 | /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */ |
62 | #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6) |
63 | |
64 | /* Define token used to determine if response written to memory is valid */ |
65 | #define QMAN_DQ_TOKEN_VALID 1 |
66 | |
67 | /* SDQCR attribute codes */ |
68 | #define QB_SDQCR_FC_SHIFT 29 |
69 | #define QB_SDQCR_FC_MASK 0x1 |
70 | #define QB_SDQCR_DCT_SHIFT 24 |
71 | #define QB_SDQCR_DCT_MASK 0x3 |
72 | #define QB_SDQCR_TOK_SHIFT 16 |
73 | #define QB_SDQCR_TOK_MASK 0xff |
74 | #define QB_SDQCR_SRC_SHIFT 0 |
75 | #define QB_SDQCR_SRC_MASK 0xffff |
76 | |
77 | /* opaque token for static dequeues */ |
78 | #define QMAN_SDQCR_TOKEN 0xbb |
79 | |
80 | #define QBMAN_EQCR_DCA_IDXMASK 0x0f |
81 | #define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31) |
82 | |
83 | #define EQ_DESC_SIZE_WITHOUT_FD 29 |
84 | #define EQ_DESC_SIZE_FD_START 32 |
85 | |
86 | enum qbman_sdqcr_dct { |
87 | qbman_sdqcr_dct_null = 0, |
88 | qbman_sdqcr_dct_prio_ics, |
89 | qbman_sdqcr_dct_active_ics, |
90 | qbman_sdqcr_dct_active |
91 | }; |
92 | |
93 | enum qbman_sdqcr_fc { |
94 | qbman_sdqcr_fc_one = 0, |
95 | qbman_sdqcr_fc_up_to_3 = 1 |
96 | }; |
97 | |
98 | /* Internal Function declaration */ |
99 | static int qbman_swp_enqueue_direct(struct qbman_swp *s, |
100 | const struct qbman_eq_desc *d, |
101 | const struct dpaa2_fd *fd); |
102 | static int qbman_swp_enqueue_mem_back(struct qbman_swp *s, |
103 | const struct qbman_eq_desc *d, |
104 | const struct dpaa2_fd *fd); |
105 | static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s, |
106 | const struct qbman_eq_desc *d, |
107 | const struct dpaa2_fd *fd, |
108 | uint32_t *flags, |
109 | int num_frames); |
110 | static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s, |
111 | const struct qbman_eq_desc *d, |
112 | const struct dpaa2_fd *fd, |
113 | uint32_t *flags, |
114 | int num_frames); |
115 | static int |
116 | qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s, |
117 | const struct qbman_eq_desc *d, |
118 | const struct dpaa2_fd *fd, |
119 | int num_frames); |
120 | static |
121 | int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s, |
122 | const struct qbman_eq_desc *d, |
123 | const struct dpaa2_fd *fd, |
124 | int num_frames); |
125 | static int qbman_swp_pull_direct(struct qbman_swp *s, |
126 | struct qbman_pull_desc *d); |
127 | static int qbman_swp_pull_mem_back(struct qbman_swp *s, |
128 | struct qbman_pull_desc *d); |
129 | |
130 | const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s); |
131 | const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s); |
132 | |
133 | static int qbman_swp_release_direct(struct qbman_swp *s, |
134 | const struct qbman_release_desc *d, |
135 | const u64 *buffers, |
136 | unsigned int num_buffers); |
137 | static int qbman_swp_release_mem_back(struct qbman_swp *s, |
138 | const struct qbman_release_desc *d, |
139 | const u64 *buffers, |
140 | unsigned int num_buffers); |
141 | |
142 | /* Function pointers */ |
143 | int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s, |
144 | const struct qbman_eq_desc *d, |
145 | const struct dpaa2_fd *fd) |
146 | = qbman_swp_enqueue_direct; |
147 | |
148 | int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s, |
149 | const struct qbman_eq_desc *d, |
150 | const struct dpaa2_fd *fd, |
151 | uint32_t *flags, |
152 | int num_frames) |
153 | = qbman_swp_enqueue_multiple_direct; |
154 | |
155 | int |
156 | (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s, |
157 | const struct qbman_eq_desc *d, |
158 | const struct dpaa2_fd *fd, |
159 | int num_frames) |
160 | = qbman_swp_enqueue_multiple_desc_direct; |
161 | |
162 | int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d) |
163 | = qbman_swp_pull_direct; |
164 | |
165 | const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s) |
166 | = qbman_swp_dqrr_next_direct; |
167 | |
168 | int (*qbman_swp_release_ptr)(struct qbman_swp *s, |
169 | const struct qbman_release_desc *d, |
170 | const u64 *buffers, |
171 | unsigned int num_buffers) |
172 | = qbman_swp_release_direct; |
173 | |
174 | /* Portal Access */ |
175 | |
176 | static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset) |
177 | { |
178 | return readl_relaxed(p->addr_cinh + offset); |
179 | } |
180 | |
181 | static inline void qbman_write_register(struct qbman_swp *p, u32 offset, |
182 | u32 value) |
183 | { |
184 | writel_relaxed(value, p->addr_cinh + offset); |
185 | } |
186 | |
187 | static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset) |
188 | { |
189 | return p->addr_cena + offset; |
190 | } |
191 | |
192 | #define QBMAN_CINH_SWP_CFG 0xd00 |
193 | |
194 | #define SWP_CFG_DQRR_MF_SHIFT 20 |
195 | #define SWP_CFG_EST_SHIFT 16 |
196 | #define SWP_CFG_CPBS_SHIFT 15 |
197 | #define SWP_CFG_WN_SHIFT 14 |
198 | #define SWP_CFG_RPM_SHIFT 12 |
199 | #define SWP_CFG_DCM_SHIFT 10 |
200 | #define SWP_CFG_EPM_SHIFT 8 |
201 | #define SWP_CFG_VPM_SHIFT 7 |
202 | #define SWP_CFG_CPM_SHIFT 6 |
203 | #define SWP_CFG_SD_SHIFT 5 |
204 | #define SWP_CFG_SP_SHIFT 4 |
205 | #define SWP_CFG_SE_SHIFT 3 |
206 | #define SWP_CFG_DP_SHIFT 2 |
207 | #define SWP_CFG_DE_SHIFT 1 |
208 | #define SWP_CFG_EP_SHIFT 0 |
209 | |
210 | static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm, |
211 | u8 epm, int sd, int sp, int se, |
212 | int dp, int de, int ep) |
213 | { |
214 | return (max_fill << SWP_CFG_DQRR_MF_SHIFT | |
215 | est << SWP_CFG_EST_SHIFT | |
216 | wn << SWP_CFG_WN_SHIFT | |
217 | rpm << SWP_CFG_RPM_SHIFT | |
218 | dcm << SWP_CFG_DCM_SHIFT | |
219 | epm << SWP_CFG_EPM_SHIFT | |
220 | sd << SWP_CFG_SD_SHIFT | |
221 | sp << SWP_CFG_SP_SHIFT | |
222 | se << SWP_CFG_SE_SHIFT | |
223 | dp << SWP_CFG_DP_SHIFT | |
224 | de << SWP_CFG_DE_SHIFT | |
225 | ep << SWP_CFG_EP_SHIFT); |
226 | } |
227 | |
228 | #define QMAN_RT_MODE 0x00000100 |
229 | |
230 | static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last) |
231 | { |
232 | /* 'first' is included, 'last' is excluded */ |
233 | if (first <= last) |
234 | return last - first; |
235 | else |
236 | return (2 * ringsize) - (first - last); |
237 | } |
238 | |
239 | /** |
240 | * qbman_swp_init() - Create a functional object representing the given |
241 | * QBMan portal descriptor. |
242 | * @d: the given qbman swp descriptor |
243 | * |
244 | * Return qbman_swp portal for success, NULL if the object cannot |
245 | * be created. |
246 | */ |
247 | struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) |
248 | { |
249 | struct qbman_swp *p = kzalloc(size: sizeof(*p), GFP_KERNEL); |
250 | u32 reg; |
251 | u32 mask_size; |
252 | u32 eqcr_pi; |
253 | |
254 | if (!p) |
255 | return NULL; |
256 | |
257 | spin_lock_init(&p->access_spinlock); |
258 | |
259 | p->desc = d; |
260 | p->mc.valid_bit = QB_VALID_BIT; |
261 | p->sdq = 0; |
262 | p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT; |
263 | p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT; |
264 | p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT; |
265 | if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) |
266 | p->mr.valid_bit = QB_VALID_BIT; |
267 | |
268 | atomic_set(v: &p->vdq.available, i: 1); |
269 | p->vdq.valid_bit = QB_VALID_BIT; |
270 | p->dqrr.next_idx = 0; |
271 | p->dqrr.valid_bit = QB_VALID_BIT; |
272 | |
273 | if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) { |
274 | p->dqrr.dqrr_size = 4; |
275 | p->dqrr.reset_bug = 1; |
276 | } else { |
277 | p->dqrr.dqrr_size = 8; |
278 | p->dqrr.reset_bug = 0; |
279 | } |
280 | |
281 | p->addr_cena = d->cena_bar; |
282 | p->addr_cinh = d->cinh_bar; |
283 | |
284 | if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { |
285 | |
286 | reg = qbman_set_swp_cfg(max_fill: p->dqrr.dqrr_size, |
287 | wn: 1, /* Writes Non-cacheable */ |
288 | est: 0, /* EQCR_CI stashing threshold */ |
289 | rpm: 3, /* RPM: RCR in array mode */ |
290 | dcm: 2, /* DCM: Discrete consumption ack */ |
291 | epm: 2, /* EPM: EQCR in ring mode */ |
292 | sd: 1, /* mem stashing drop enable enable */ |
293 | sp: 1, /* mem stashing priority enable */ |
294 | se: 1, /* mem stashing enable */ |
295 | dp: 1, /* dequeue stashing priority enable */ |
296 | de: 0, /* dequeue stashing enable enable */ |
297 | ep: 0); /* EQCR_CI stashing priority enable */ |
298 | } else { |
299 | memset(p->addr_cena, 0, 64 * 1024); |
300 | reg = qbman_set_swp_cfg(max_fill: p->dqrr.dqrr_size, |
301 | wn: 1, /* Writes Non-cacheable */ |
302 | est: 1, /* EQCR_CI stashing threshold */ |
303 | rpm: 3, /* RPM: RCR in array mode */ |
304 | dcm: 2, /* DCM: Discrete consumption ack */ |
305 | epm: 0, /* EPM: EQCR in ring mode */ |
306 | sd: 1, /* mem stashing drop enable */ |
307 | sp: 1, /* mem stashing priority enable */ |
308 | se: 1, /* mem stashing enable */ |
309 | dp: 1, /* dequeue stashing priority enable */ |
310 | de: 0, /* dequeue stashing enable */ |
311 | ep: 0); /* EQCR_CI stashing priority enable */ |
312 | reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */ |
313 | 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */ |
314 | 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */ |
315 | } |
316 | |
317 | qbman_write_register(p, QBMAN_CINH_SWP_CFG, value: reg); |
318 | reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG); |
319 | if (!reg) { |
320 | pr_err("qbman: the portal is not enabled!\n" ); |
321 | kfree(objp: p); |
322 | return NULL; |
323 | } |
324 | |
325 | if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) { |
326 | qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE); |
327 | qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE); |
328 | } |
329 | /* |
330 | * SDQCR needs to be initialized to 0 when no channels are |
331 | * being dequeued from or else the QMan HW will indicate an |
332 | * error. The values that were calculated above will be |
333 | * applied when dequeues from a specific channel are enabled. |
334 | */ |
335 | qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, value: 0); |
336 | |
337 | p->eqcr.pi_ring_size = 8; |
338 | if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) { |
339 | p->eqcr.pi_ring_size = 32; |
340 | qbman_swp_enqueue_ptr = |
341 | qbman_swp_enqueue_mem_back; |
342 | qbman_swp_enqueue_multiple_ptr = |
343 | qbman_swp_enqueue_multiple_mem_back; |
344 | qbman_swp_enqueue_multiple_desc_ptr = |
345 | qbman_swp_enqueue_multiple_desc_mem_back; |
346 | qbman_swp_pull_ptr = qbman_swp_pull_mem_back; |
347 | qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back; |
348 | qbman_swp_release_ptr = qbman_swp_release_mem_back; |
349 | } |
350 | |
351 | for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1) |
352 | p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1; |
353 | eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI); |
354 | p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask; |
355 | p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT; |
356 | p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI) |
357 | & p->eqcr.pi_ci_mask; |
358 | p->eqcr.available = p->eqcr.pi_ring_size; |
359 | |
360 | /* Initialize the software portal with a irq timeout period of 0us */ |
361 | qbman_swp_set_irq_coalescing(p, irq_threshold: p->dqrr.dqrr_size - 1, irq_holdoff: 0); |
362 | |
363 | return p; |
364 | } |
365 | |
366 | /** |
367 | * qbman_swp_finish() - Create and destroy a functional object representing |
368 | * the given QBMan portal descriptor. |
369 | * @p: the qbman_swp object to be destroyed |
370 | */ |
371 | void qbman_swp_finish(struct qbman_swp *p) |
372 | { |
373 | kfree(objp: p); |
374 | } |
375 | |
376 | /** |
377 | * qbman_swp_interrupt_read_status() |
378 | * @p: the given software portal |
379 | * |
380 | * Return the value in the SWP_ISR register. |
381 | */ |
382 | u32 qbman_swp_interrupt_read_status(struct qbman_swp *p) |
383 | { |
384 | return qbman_read_register(p, QBMAN_CINH_SWP_ISR); |
385 | } |
386 | |
387 | /** |
388 | * qbman_swp_interrupt_clear_status() |
389 | * @p: the given software portal |
390 | * @mask: The mask to clear in SWP_ISR register |
391 | */ |
392 | void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask) |
393 | { |
394 | qbman_write_register(p, QBMAN_CINH_SWP_ISR, value: mask); |
395 | } |
396 | |
397 | /** |
398 | * qbman_swp_interrupt_get_trigger() - read interrupt enable register |
399 | * @p: the given software portal |
400 | * |
401 | * Return the value in the SWP_IER register. |
402 | */ |
403 | u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p) |
404 | { |
405 | return qbman_read_register(p, QBMAN_CINH_SWP_IER); |
406 | } |
407 | |
408 | /** |
409 | * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp |
410 | * @p: the given software portal |
411 | * @mask: The mask of bits to enable in SWP_IER |
412 | */ |
413 | void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask) |
414 | { |
415 | qbman_write_register(p, QBMAN_CINH_SWP_IER, value: mask); |
416 | } |
417 | |
418 | /** |
419 | * qbman_swp_interrupt_get_inhibit() - read interrupt mask register |
420 | * @p: the given software portal object |
421 | * |
422 | * Return the value in the SWP_IIR register. |
423 | */ |
424 | int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p) |
425 | { |
426 | return qbman_read_register(p, QBMAN_CINH_SWP_IIR); |
427 | } |
428 | |
429 | /** |
430 | * qbman_swp_interrupt_set_inhibit() - write interrupt mask register |
431 | * @p: the given software portal object |
432 | * @inhibit: whether to inhibit the IRQs |
433 | */ |
434 | void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit) |
435 | { |
436 | qbman_write_register(p, QBMAN_CINH_SWP_IIR, value: inhibit ? 0xffffffff : 0); |
437 | } |
438 | |
439 | /* |
440 | * Different management commands all use this common base layer of code to issue |
441 | * commands and poll for results. |
442 | */ |
443 | |
444 | /* |
445 | * Returns a pointer to where the caller should fill in their management command |
446 | * (caller should ignore the verb byte) |
447 | */ |
448 | void *qbman_swp_mc_start(struct qbman_swp *p) |
449 | { |
450 | if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) |
451 | return qbman_get_cmd(p, QBMAN_CENA_SWP_CR); |
452 | else |
453 | return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM); |
454 | } |
455 | |
456 | /* |
457 | * Commits merges in the caller-supplied command verb (which should not include |
458 | * the valid-bit) and submits the command to hardware |
459 | */ |
460 | void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb) |
461 | { |
462 | u8 *v = cmd; |
463 | |
464 | if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { |
465 | dma_wmb(); |
466 | *v = cmd_verb | p->mc.valid_bit; |
467 | } else { |
468 | *v = cmd_verb | p->mc.valid_bit; |
469 | dma_wmb(); |
470 | qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE); |
471 | } |
472 | } |
473 | |
474 | /* |
475 | * Checks for a completed response (returns non-NULL if only if the response |
476 | * is complete). |
477 | */ |
478 | void *qbman_swp_mc_result(struct qbman_swp *p) |
479 | { |
480 | u32 *ret, verb; |
481 | |
482 | if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { |
483 | ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); |
484 | /* Remove the valid-bit - command completed if the rest |
485 | * is non-zero. |
486 | */ |
487 | verb = ret[0] & ~QB_VALID_BIT; |
488 | if (!verb) |
489 | return NULL; |
490 | p->mc.valid_bit ^= QB_VALID_BIT; |
491 | } else { |
492 | ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM); |
493 | /* Command completed if the valid bit is toggled */ |
494 | if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT)) |
495 | return NULL; |
496 | /* Command completed if the rest is non-zero */ |
497 | verb = ret[0] & ~QB_VALID_BIT; |
498 | if (!verb) |
499 | return NULL; |
500 | p->mr.valid_bit ^= QB_VALID_BIT; |
501 | } |
502 | |
503 | return ret; |
504 | } |
505 | |
506 | #define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0 |
507 | enum qb_enqueue_commands { |
508 | enqueue_empty = 0, |
509 | enqueue_response_always = 1, |
510 | enqueue_rejects_to_fq = 2 |
511 | }; |
512 | |
513 | #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2 |
514 | #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3 |
515 | #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4 |
516 | #define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7 |
517 | |
518 | /* |
519 | * qbman_eq_desc_clear() - Clear the contents of a descriptor to |
520 | * default/starting state. |
521 | */ |
522 | void qbman_eq_desc_clear(struct qbman_eq_desc *d) |
523 | { |
524 | memset(d, 0, sizeof(*d)); |
525 | } |
526 | |
527 | /** |
528 | * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp |
529 | * @d: the enqueue descriptor. |
530 | * @respond_success: 1 = enqueue with response always; 0 = enqueue with |
531 | * rejections returned on a FQ. |
532 | */ |
533 | void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success) |
534 | { |
535 | d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT); |
536 | if (respond_success) |
537 | d->verb |= enqueue_response_always; |
538 | else |
539 | d->verb |= enqueue_rejects_to_fq; |
540 | } |
541 | |
542 | /* |
543 | * Exactly one of the following descriptor "targets" should be set. (Calling any |
544 | * one of these will replace the effect of any prior call to one of these.) |
545 | * -enqueue to a frame queue |
546 | * -enqueue to a queuing destination |
547 | */ |
548 | |
549 | /** |
550 | * qbman_eq_desc_set_fq() - set the FQ for the enqueue command |
551 | * @d: the enqueue descriptor |
552 | * @fqid: the id of the frame queue to be enqueued |
553 | */ |
554 | void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid) |
555 | { |
556 | d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT); |
557 | d->tgtid = cpu_to_le32(fqid); |
558 | } |
559 | |
560 | /** |
561 | * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command |
562 | * @d: the enqueue descriptor |
563 | * @qdid: the id of the queuing destination to be enqueued |
564 | * @qd_bin: the queuing destination bin |
565 | * @qd_prio: the queuing destination priority |
566 | */ |
567 | void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid, |
568 | u32 qd_bin, u32 qd_prio) |
569 | { |
570 | d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT; |
571 | d->tgtid = cpu_to_le32(qdid); |
572 | d->qdbin = cpu_to_le16(qd_bin); |
573 | d->qpri = qd_prio; |
574 | } |
575 | |
576 | #define EQAR_IDX(eqar) ((eqar) & 0x7) |
577 | #define EQAR_VB(eqar) ((eqar) & 0x80) |
578 | #define EQAR_SUCCESS(eqar) ((eqar) & 0x100) |
579 | |
580 | #define QB_RT_BIT ((u32)0x100) |
581 | /** |
582 | * qbman_swp_enqueue_direct() - Issue an enqueue command |
583 | * @s: the software portal used for enqueue |
584 | * @d: the enqueue descriptor |
585 | * @fd: the frame descriptor to be enqueued |
586 | * |
587 | * Please note that 'fd' should only be NULL if the "action" of the |
588 | * descriptor is "orp_hole" or "orp_nesn". |
589 | * |
590 | * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. |
591 | */ |
592 | static |
593 | int qbman_swp_enqueue_direct(struct qbman_swp *s, |
594 | const struct qbman_eq_desc *d, |
595 | const struct dpaa2_fd *fd) |
596 | { |
597 | int flags = 0; |
598 | int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, flags: &flags, num_frames: 1); |
599 | |
600 | if (ret >= 0) |
601 | ret = 0; |
602 | else |
603 | ret = -EBUSY; |
604 | return ret; |
605 | } |
606 | |
607 | /** |
608 | * qbman_swp_enqueue_mem_back() - Issue an enqueue command |
609 | * @s: the software portal used for enqueue |
610 | * @d: the enqueue descriptor |
611 | * @fd: the frame descriptor to be enqueued |
612 | * |
613 | * Please note that 'fd' should only be NULL if the "action" of the |
614 | * descriptor is "orp_hole" or "orp_nesn". |
615 | * |
616 | * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. |
617 | */ |
618 | static |
619 | int qbman_swp_enqueue_mem_back(struct qbman_swp *s, |
620 | const struct qbman_eq_desc *d, |
621 | const struct dpaa2_fd *fd) |
622 | { |
623 | int flags = 0; |
624 | int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, flags: &flags, num_frames: 1); |
625 | |
626 | if (ret >= 0) |
627 | ret = 0; |
628 | else |
629 | ret = -EBUSY; |
630 | return ret; |
631 | } |
632 | |
633 | /** |
634 | * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command |
635 | * using one enqueue descriptor |
636 | * @s: the software portal used for enqueue |
637 | * @d: the enqueue descriptor |
638 | * @fd: table pointer of frame descriptor table to be enqueued |
639 | * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL |
640 | * @num_frames: number of fd to be enqueued |
641 | * |
642 | * Return the number of fd enqueued, or a negative error number. |
643 | */ |
644 | static |
645 | int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s, |
646 | const struct qbman_eq_desc *d, |
647 | const struct dpaa2_fd *fd, |
648 | uint32_t *flags, |
649 | int num_frames) |
650 | { |
651 | uint32_t *p = NULL; |
652 | const uint32_t *cl = (uint32_t *)d; |
653 | uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; |
654 | int i, num_enqueued = 0; |
655 | |
656 | spin_lock(lock: &s->access_spinlock); |
657 | half_mask = (s->eqcr.pi_ci_mask>>1); |
658 | full_mask = s->eqcr.pi_ci_mask; |
659 | |
660 | if (!s->eqcr.available) { |
661 | eqcr_ci = s->eqcr.ci; |
662 | p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI; |
663 | s->eqcr.ci = qbman_read_register(p: s, QBMAN_CINH_SWP_EQCR_CI); |
664 | s->eqcr.ci &= full_mask; |
665 | |
666 | s->eqcr.available = qm_cyc_diff(ringsize: s->eqcr.pi_ring_size, |
667 | first: eqcr_ci, last: s->eqcr.ci); |
668 | if (!s->eqcr.available) { |
669 | spin_unlock(lock: &s->access_spinlock); |
670 | return 0; |
671 | } |
672 | } |
673 | |
674 | eqcr_pi = s->eqcr.pi; |
675 | num_enqueued = (s->eqcr.available < num_frames) ? |
676 | s->eqcr.available : num_frames; |
677 | s->eqcr.available -= num_enqueued; |
678 | /* Fill in the EQCR ring */ |
679 | for (i = 0; i < num_enqueued; i++) { |
680 | p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); |
681 | /* Skip copying the verb */ |
682 | memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); |
683 | memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], |
684 | &fd[i], sizeof(*fd)); |
685 | eqcr_pi++; |
686 | } |
687 | |
688 | dma_wmb(); |
689 | |
690 | /* Set the verb byte, have to substitute in the valid-bit */ |
691 | eqcr_pi = s->eqcr.pi; |
692 | for (i = 0; i < num_enqueued; i++) { |
693 | p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); |
694 | p[0] = cl[0] | s->eqcr.pi_vb; |
695 | if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { |
696 | struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p; |
697 | |
698 | eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | |
699 | ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK); |
700 | } |
701 | eqcr_pi++; |
702 | if (!(eqcr_pi & half_mask)) |
703 | s->eqcr.pi_vb ^= QB_VALID_BIT; |
704 | } |
705 | |
706 | /* Flush all the cacheline without load/store in between */ |
707 | eqcr_pi = s->eqcr.pi; |
708 | for (i = 0; i < num_enqueued; i++) |
709 | eqcr_pi++; |
710 | s->eqcr.pi = eqcr_pi & full_mask; |
711 | spin_unlock(lock: &s->access_spinlock); |
712 | |
713 | return num_enqueued; |
714 | } |
715 | |
716 | /** |
717 | * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command |
718 | * using one enqueue descriptor |
719 | * @s: the software portal used for enqueue |
720 | * @d: the enqueue descriptor |
721 | * @fd: table pointer of frame descriptor table to be enqueued |
722 | * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL |
723 | * @num_frames: number of fd to be enqueued |
724 | * |
725 | * Return the number of fd enqueued, or a negative error number. |
726 | */ |
727 | static |
728 | int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s, |
729 | const struct qbman_eq_desc *d, |
730 | const struct dpaa2_fd *fd, |
731 | uint32_t *flags, |
732 | int num_frames) |
733 | { |
734 | uint32_t *p = NULL; |
735 | const uint32_t *cl = (uint32_t *)(d); |
736 | uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; |
737 | int i, num_enqueued = 0; |
738 | unsigned long irq_flags; |
739 | |
740 | spin_lock_irqsave(&s->access_spinlock, irq_flags); |
741 | |
742 | half_mask = (s->eqcr.pi_ci_mask>>1); |
743 | full_mask = s->eqcr.pi_ci_mask; |
744 | if (!s->eqcr.available) { |
745 | eqcr_ci = s->eqcr.ci; |
746 | s->eqcr.ci = qbman_read_register(p: s, QBMAN_CINH_SWP_EQCR_CI); |
747 | s->eqcr.ci &= full_mask; |
748 | s->eqcr.available = qm_cyc_diff(ringsize: s->eqcr.pi_ring_size, |
749 | first: eqcr_ci, last: s->eqcr.ci); |
750 | if (!s->eqcr.available) { |
751 | spin_unlock_irqrestore(lock: &s->access_spinlock, flags: irq_flags); |
752 | return 0; |
753 | } |
754 | } |
755 | |
756 | eqcr_pi = s->eqcr.pi; |
757 | num_enqueued = (s->eqcr.available < num_frames) ? |
758 | s->eqcr.available : num_frames; |
759 | s->eqcr.available -= num_enqueued; |
760 | /* Fill in the EQCR ring */ |
761 | for (i = 0; i < num_enqueued; i++) { |
762 | p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); |
763 | /* Skip copying the verb */ |
764 | memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); |
765 | memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], |
766 | &fd[i], sizeof(*fd)); |
767 | eqcr_pi++; |
768 | } |
769 | |
770 | /* Set the verb byte, have to substitute in the valid-bit */ |
771 | eqcr_pi = s->eqcr.pi; |
772 | for (i = 0; i < num_enqueued; i++) { |
773 | p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); |
774 | p[0] = cl[0] | s->eqcr.pi_vb; |
775 | if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { |
776 | struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p; |
777 | |
778 | eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | |
779 | ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK); |
780 | } |
781 | eqcr_pi++; |
782 | if (!(eqcr_pi & half_mask)) |
783 | s->eqcr.pi_vb ^= QB_VALID_BIT; |
784 | } |
785 | s->eqcr.pi = eqcr_pi & full_mask; |
786 | |
787 | dma_wmb(); |
788 | qbman_write_register(p: s, QBMAN_CINH_SWP_EQCR_PI, |
789 | value: (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb); |
790 | spin_unlock_irqrestore(lock: &s->access_spinlock, flags: irq_flags); |
791 | |
792 | return num_enqueued; |
793 | } |
794 | |
795 | /** |
796 | * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command |
797 | * using multiple enqueue descriptor |
798 | * @s: the software portal used for enqueue |
799 | * @d: table of minimal enqueue descriptor |
800 | * @fd: table pointer of frame descriptor table to be enqueued |
801 | * @num_frames: number of fd to be enqueued |
802 | * |
803 | * Return the number of fd enqueued, or a negative error number. |
804 | */ |
805 | static |
806 | int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s, |
807 | const struct qbman_eq_desc *d, |
808 | const struct dpaa2_fd *fd, |
809 | int num_frames) |
810 | { |
811 | uint32_t *p; |
812 | const uint32_t *cl; |
813 | uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; |
814 | int i, num_enqueued = 0; |
815 | |
816 | half_mask = (s->eqcr.pi_ci_mask>>1); |
817 | full_mask = s->eqcr.pi_ci_mask; |
818 | if (!s->eqcr.available) { |
819 | eqcr_ci = s->eqcr.ci; |
820 | p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI; |
821 | s->eqcr.ci = qbman_read_register(p: s, QBMAN_CINH_SWP_EQCR_CI); |
822 | s->eqcr.available = qm_cyc_diff(ringsize: s->eqcr.pi_ring_size, |
823 | first: eqcr_ci, last: s->eqcr.ci); |
824 | if (!s->eqcr.available) |
825 | return 0; |
826 | } |
827 | |
828 | eqcr_pi = s->eqcr.pi; |
829 | num_enqueued = (s->eqcr.available < num_frames) ? |
830 | s->eqcr.available : num_frames; |
831 | s->eqcr.available -= num_enqueued; |
832 | /* Fill in the EQCR ring */ |
833 | for (i = 0; i < num_enqueued; i++) { |
834 | p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); |
835 | cl = (uint32_t *)(&d[i]); |
836 | /* Skip copying the verb */ |
837 | memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); |
838 | memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], |
839 | &fd[i], sizeof(*fd)); |
840 | eqcr_pi++; |
841 | } |
842 | |
843 | dma_wmb(); |
844 | |
845 | /* Set the verb byte, have to substitute in the valid-bit */ |
846 | eqcr_pi = s->eqcr.pi; |
847 | for (i = 0; i < num_enqueued; i++) { |
848 | p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); |
849 | cl = (uint32_t *)(&d[i]); |
850 | p[0] = cl[0] | s->eqcr.pi_vb; |
851 | eqcr_pi++; |
852 | if (!(eqcr_pi & half_mask)) |
853 | s->eqcr.pi_vb ^= QB_VALID_BIT; |
854 | } |
855 | |
856 | /* Flush all the cacheline without load/store in between */ |
857 | eqcr_pi = s->eqcr.pi; |
858 | for (i = 0; i < num_enqueued; i++) |
859 | eqcr_pi++; |
860 | s->eqcr.pi = eqcr_pi & full_mask; |
861 | |
862 | return num_enqueued; |
863 | } |
864 | |
865 | /** |
866 | * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command |
867 | * using multiple enqueue descriptor |
868 | * @s: the software portal used for enqueue |
869 | * @d: table of minimal enqueue descriptor |
870 | * @fd: table pointer of frame descriptor table to be enqueued |
871 | * @num_frames: number of fd to be enqueued |
872 | * |
873 | * Return the number of fd enqueued, or a negative error number. |
874 | */ |
875 | static |
876 | int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s, |
877 | const struct qbman_eq_desc *d, |
878 | const struct dpaa2_fd *fd, |
879 | int num_frames) |
880 | { |
881 | uint32_t *p; |
882 | const uint32_t *cl; |
883 | uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; |
884 | int i, num_enqueued = 0; |
885 | |
886 | half_mask = (s->eqcr.pi_ci_mask>>1); |
887 | full_mask = s->eqcr.pi_ci_mask; |
888 | if (!s->eqcr.available) { |
889 | eqcr_ci = s->eqcr.ci; |
890 | s->eqcr.ci = qbman_read_register(p: s, QBMAN_CINH_SWP_EQCR_CI); |
891 | s->eqcr.ci &= full_mask; |
892 | s->eqcr.available = qm_cyc_diff(ringsize: s->eqcr.pi_ring_size, |
893 | first: eqcr_ci, last: s->eqcr.ci); |
894 | if (!s->eqcr.available) |
895 | return 0; |
896 | } |
897 | |
898 | eqcr_pi = s->eqcr.pi; |
899 | num_enqueued = (s->eqcr.available < num_frames) ? |
900 | s->eqcr.available : num_frames; |
901 | s->eqcr.available -= num_enqueued; |
902 | /* Fill in the EQCR ring */ |
903 | for (i = 0; i < num_enqueued; i++) { |
904 | p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); |
905 | cl = (uint32_t *)(&d[i]); |
906 | /* Skip copying the verb */ |
907 | memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); |
908 | memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], |
909 | &fd[i], sizeof(*fd)); |
910 | eqcr_pi++; |
911 | } |
912 | |
913 | /* Set the verb byte, have to substitute in the valid-bit */ |
914 | eqcr_pi = s->eqcr.pi; |
915 | for (i = 0; i < num_enqueued; i++) { |
916 | p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); |
917 | cl = (uint32_t *)(&d[i]); |
918 | p[0] = cl[0] | s->eqcr.pi_vb; |
919 | eqcr_pi++; |
920 | if (!(eqcr_pi & half_mask)) |
921 | s->eqcr.pi_vb ^= QB_VALID_BIT; |
922 | } |
923 | |
924 | s->eqcr.pi = eqcr_pi & full_mask; |
925 | |
926 | dma_wmb(); |
927 | qbman_write_register(p: s, QBMAN_CINH_SWP_EQCR_PI, |
928 | value: (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb); |
929 | |
930 | return num_enqueued; |
931 | } |
932 | |
933 | /* Static (push) dequeue */ |
934 | |
935 | /** |
936 | * qbman_swp_push_get() - Get the push dequeue setup |
937 | * @s: the software portal object |
938 | * @channel_idx: the channel index to query |
939 | * @enabled: returned boolean to show whether the push dequeue is enabled |
940 | * for the given channel |
941 | */ |
942 | void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled) |
943 | { |
944 | u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK; |
945 | |
946 | WARN_ON(channel_idx > 15); |
947 | *enabled = src | (1 << channel_idx); |
948 | } |
949 | |
950 | /** |
951 | * qbman_swp_push_set() - Enable or disable push dequeue |
952 | * @s: the software portal object |
953 | * @channel_idx: the channel index (0 to 15) |
954 | * @enable: enable or disable push dequeue |
955 | */ |
956 | void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable) |
957 | { |
958 | u16 dqsrc; |
959 | |
960 | WARN_ON(channel_idx > 15); |
961 | if (enable) |
962 | s->sdq |= 1 << channel_idx; |
963 | else |
964 | s->sdq &= ~(1 << channel_idx); |
965 | |
966 | /* Read make the complete src map. If no channels are enabled |
967 | * the SDQCR must be 0 or else QMan will assert errors |
968 | */ |
969 | dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK; |
970 | if (dqsrc != 0) |
971 | qbman_write_register(p: s, QBMAN_CINH_SWP_SDQCR, value: s->sdq); |
972 | else |
973 | qbman_write_register(p: s, QBMAN_CINH_SWP_SDQCR, value: 0); |
974 | } |
975 | |
976 | #define QB_VDQCR_VERB_DCT_SHIFT 0 |
977 | #define QB_VDQCR_VERB_DT_SHIFT 2 |
978 | #define QB_VDQCR_VERB_RLS_SHIFT 4 |
979 | #define QB_VDQCR_VERB_WAE_SHIFT 5 |
980 | |
981 | enum qb_pull_dt_e { |
982 | qb_pull_dt_channel, |
983 | qb_pull_dt_workqueue, |
984 | qb_pull_dt_framequeue |
985 | }; |
986 | |
987 | /** |
988 | * qbman_pull_desc_clear() - Clear the contents of a descriptor to |
989 | * default/starting state |
990 | * @d: the pull dequeue descriptor to be cleared |
991 | */ |
992 | void qbman_pull_desc_clear(struct qbman_pull_desc *d) |
993 | { |
994 | memset(d, 0, sizeof(*d)); |
995 | } |
996 | |
997 | /** |
998 | * qbman_pull_desc_set_storage()- Set the pull dequeue storage |
999 | * @d: the pull dequeue descriptor to be set |
1000 | * @storage: the pointer of the memory to store the dequeue result |
1001 | * @storage_phys: the physical address of the storage memory |
1002 | * @stash: to indicate whether write allocate is enabled |
1003 | * |
1004 | * If not called, or if called with 'storage' as NULL, the result pull dequeues |
1005 | * will produce results to DQRR. If 'storage' is non-NULL, then results are |
1006 | * produced to the given memory location (using the DMA address which |
1007 | * the caller provides in 'storage_phys'), and 'stash' controls whether or not |
1008 | * those writes to main-memory express a cache-warming attribute. |
1009 | */ |
1010 | void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, |
1011 | struct dpaa2_dq *storage, |
1012 | dma_addr_t storage_phys, |
1013 | int stash) |
1014 | { |
1015 | /* save the virtual address */ |
1016 | d->rsp_addr_virt = (u64)(uintptr_t)storage; |
1017 | |
1018 | if (!storage) { |
1019 | d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT); |
1020 | return; |
1021 | } |
1022 | d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT; |
1023 | if (stash) |
1024 | d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT; |
1025 | else |
1026 | d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT); |
1027 | |
1028 | d->rsp_addr = cpu_to_le64(storage_phys); |
1029 | } |
1030 | |
1031 | /** |
1032 | * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued |
1033 | * @d: the pull dequeue descriptor to be set |
1034 | * @numframes: number of frames to be set, must be between 1 and 16, inclusive |
1035 | */ |
1036 | void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes) |
1037 | { |
1038 | d->numf = numframes - 1; |
1039 | } |
1040 | |
1041 | /* |
1042 | * Exactly one of the following descriptor "actions" should be set. (Calling any |
1043 | * one of these will replace the effect of any prior call to one of these.) |
1044 | * - pull dequeue from the given frame queue (FQ) |
1045 | * - pull dequeue from any FQ in the given work queue (WQ) |
1046 | * - pull dequeue from any FQ in any WQ in the given channel |
1047 | */ |
1048 | |
1049 | /** |
1050 | * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues |
1051 | * @d: the pull dequeue descriptor to be set |
1052 | * @fqid: the frame queue index of the given FQ |
1053 | */ |
1054 | void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid) |
1055 | { |
1056 | d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT; |
1057 | d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT; |
1058 | d->dq_src = cpu_to_le32(fqid); |
1059 | } |
1060 | |
1061 | /** |
1062 | * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues |
1063 | * @d: the pull dequeue descriptor to be set |
1064 | * @wqid: composed of channel id and wqid within the channel |
1065 | * @dct: the dequeue command type |
1066 | */ |
1067 | void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid, |
1068 | enum qbman_pull_type_e dct) |
1069 | { |
1070 | d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT; |
1071 | d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT; |
1072 | d->dq_src = cpu_to_le32(wqid); |
1073 | } |
1074 | |
1075 | /** |
1076 | * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command |
1077 | * dequeues |
1078 | * @d: the pull dequeue descriptor to be set |
1079 | * @chid: the channel id to be dequeued |
1080 | * @dct: the dequeue command type |
1081 | */ |
1082 | void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid, |
1083 | enum qbman_pull_type_e dct) |
1084 | { |
1085 | d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT; |
1086 | d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT; |
1087 | d->dq_src = cpu_to_le32(chid); |
1088 | } |
1089 | |
1090 | /** |
1091 | * qbman_swp_pull_direct() - Issue the pull dequeue command |
1092 | * @s: the software portal object |
1093 | * @d: the software portal descriptor which has been configured with |
1094 | * the set of qbman_pull_desc_set_*() calls |
1095 | * |
1096 | * Return 0 for success, and -EBUSY if the software portal is not ready |
1097 | * to do pull dequeue. |
1098 | */ |
1099 | static |
1100 | int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d) |
1101 | { |
1102 | struct qbman_pull_desc *p; |
1103 | |
1104 | if (!atomic_dec_and_test(v: &s->vdq.available)) { |
1105 | atomic_inc(v: &s->vdq.available); |
1106 | return -EBUSY; |
1107 | } |
1108 | s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt; |
1109 | if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) |
1110 | p = qbman_get_cmd(p: s, QBMAN_CENA_SWP_VDQCR); |
1111 | else |
1112 | p = qbman_get_cmd(p: s, QBMAN_CENA_SWP_VDQCR_MEM); |
1113 | p->numf = d->numf; |
1114 | p->tok = QMAN_DQ_TOKEN_VALID; |
1115 | p->dq_src = d->dq_src; |
1116 | p->rsp_addr = d->rsp_addr; |
1117 | p->rsp_addr_virt = d->rsp_addr_virt; |
1118 | dma_wmb(); |
1119 | /* Set the verb byte, have to substitute in the valid-bit */ |
1120 | p->verb = d->verb | s->vdq.valid_bit; |
1121 | s->vdq.valid_bit ^= QB_VALID_BIT; |
1122 | |
1123 | return 0; |
1124 | } |
1125 | |
1126 | /** |
1127 | * qbman_swp_pull_mem_back() - Issue the pull dequeue command |
1128 | * @s: the software portal object |
1129 | * @d: the software portal descriptor which has been configured with |
1130 | * the set of qbman_pull_desc_set_*() calls |
1131 | * |
1132 | * Return 0 for success, and -EBUSY if the software portal is not ready |
1133 | * to do pull dequeue. |
1134 | */ |
1135 | static |
1136 | int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d) |
1137 | { |
1138 | struct qbman_pull_desc *p; |
1139 | |
1140 | if (!atomic_dec_and_test(v: &s->vdq.available)) { |
1141 | atomic_inc(v: &s->vdq.available); |
1142 | return -EBUSY; |
1143 | } |
1144 | s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt; |
1145 | if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) |
1146 | p = qbman_get_cmd(p: s, QBMAN_CENA_SWP_VDQCR); |
1147 | else |
1148 | p = qbman_get_cmd(p: s, QBMAN_CENA_SWP_VDQCR_MEM); |
1149 | p->numf = d->numf; |
1150 | p->tok = QMAN_DQ_TOKEN_VALID; |
1151 | p->dq_src = d->dq_src; |
1152 | p->rsp_addr = d->rsp_addr; |
1153 | p->rsp_addr_virt = d->rsp_addr_virt; |
1154 | |
1155 | /* Set the verb byte, have to substitute in the valid-bit */ |
1156 | p->verb = d->verb | s->vdq.valid_bit; |
1157 | s->vdq.valid_bit ^= QB_VALID_BIT; |
1158 | dma_wmb(); |
1159 | qbman_write_register(p: s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE); |
1160 | |
1161 | return 0; |
1162 | } |
1163 | |
1164 | #define QMAN_DQRR_PI_MASK 0xf |
1165 | |
1166 | /** |
1167 | * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry |
1168 | * @s: the software portal object |
1169 | * |
1170 | * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry |
1171 | * only once, so repeated calls can return a sequence of DQRR entries, without |
1172 | * requiring they be consumed immediately or in any particular order. |
1173 | */ |
1174 | const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s) |
1175 | { |
1176 | u32 verb; |
1177 | u32 response_verb; |
1178 | u32 flags; |
1179 | struct dpaa2_dq *p; |
1180 | |
1181 | /* Before using valid-bit to detect if something is there, we have to |
1182 | * handle the case of the DQRR reset bug... |
1183 | */ |
1184 | if (unlikely(s->dqrr.reset_bug)) { |
1185 | /* |
1186 | * We pick up new entries by cache-inhibited producer index, |
1187 | * which means that a non-coherent mapping would require us to |
1188 | * invalidate and read *only* once that PI has indicated that |
1189 | * there's an entry here. The first trip around the DQRR ring |
1190 | * will be much less efficient than all subsequent trips around |
1191 | * it... |
1192 | */ |
1193 | u8 pi = qbman_read_register(p: s, QBMAN_CINH_SWP_DQPI) & |
1194 | QMAN_DQRR_PI_MASK; |
1195 | |
1196 | /* there are new entries if pi != next_idx */ |
1197 | if (pi == s->dqrr.next_idx) |
1198 | return NULL; |
1199 | |
1200 | /* |
1201 | * if next_idx is/was the last ring index, and 'pi' is |
1202 | * different, we can disable the workaround as all the ring |
1203 | * entries have now been DMA'd to so valid-bit checking is |
1204 | * repaired. Note: this logic needs to be based on next_idx |
1205 | * (which increments one at a time), rather than on pi (which |
1206 | * can burst and wrap-around between our snapshots of it). |
1207 | */ |
1208 | if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) { |
1209 | pr_debug("next_idx=%d, pi=%d, clear reset bug\n" , |
1210 | s->dqrr.next_idx, pi); |
1211 | s->dqrr.reset_bug = 0; |
1212 | } |
1213 | prefetch(x: qbman_get_cmd(p: s, |
1214 | QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); |
1215 | } |
1216 | |
1217 | p = qbman_get_cmd(p: s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); |
1218 | verb = p->dq.verb; |
1219 | |
1220 | /* |
1221 | * If the valid-bit isn't of the expected polarity, nothing there. Note, |
1222 | * in the DQRR reset bug workaround, we shouldn't need to skip these |
1223 | * check, because we've already determined that a new entry is available |
1224 | * and we've invalidated the cacheline before reading it, so the |
1225 | * valid-bit behaviour is repaired and should tell us what we already |
1226 | * knew from reading PI. |
1227 | */ |
1228 | if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) { |
1229 | prefetch(x: qbman_get_cmd(p: s, |
1230 | QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); |
1231 | return NULL; |
1232 | } |
1233 | /* |
1234 | * There's something there. Move "next_idx" attention to the next ring |
1235 | * entry (and prefetch it) before returning what we found. |
1236 | */ |
1237 | s->dqrr.next_idx++; |
1238 | s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */ |
1239 | if (!s->dqrr.next_idx) |
1240 | s->dqrr.valid_bit ^= QB_VALID_BIT; |
1241 | |
1242 | /* |
1243 | * If this is the final response to a volatile dequeue command |
1244 | * indicate that the vdq is available |
1245 | */ |
1246 | flags = p->dq.stat; |
1247 | response_verb = verb & QBMAN_RESULT_MASK; |
1248 | if ((response_verb == QBMAN_RESULT_DQ) && |
1249 | (flags & DPAA2_DQ_STAT_VOLATILE) && |
1250 | (flags & DPAA2_DQ_STAT_EXPIRED)) |
1251 | atomic_inc(v: &s->vdq.available); |
1252 | |
1253 | prefetch(x: qbman_get_cmd(p: s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); |
1254 | |
1255 | return p; |
1256 | } |
1257 | |
1258 | /** |
1259 | * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry |
1260 | * @s: the software portal object |
1261 | * |
1262 | * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry |
1263 | * only once, so repeated calls can return a sequence of DQRR entries, without |
1264 | * requiring they be consumed immediately or in any particular order. |
1265 | */ |
1266 | const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s) |
1267 | { |
1268 | u32 verb; |
1269 | u32 response_verb; |
1270 | u32 flags; |
1271 | struct dpaa2_dq *p; |
1272 | |
1273 | /* Before using valid-bit to detect if something is there, we have to |
1274 | * handle the case of the DQRR reset bug... |
1275 | */ |
1276 | if (unlikely(s->dqrr.reset_bug)) { |
1277 | /* |
1278 | * We pick up new entries by cache-inhibited producer index, |
1279 | * which means that a non-coherent mapping would require us to |
1280 | * invalidate and read *only* once that PI has indicated that |
1281 | * there's an entry here. The first trip around the DQRR ring |
1282 | * will be much less efficient than all subsequent trips around |
1283 | * it... |
1284 | */ |
1285 | u8 pi = qbman_read_register(p: s, QBMAN_CINH_SWP_DQPI) & |
1286 | QMAN_DQRR_PI_MASK; |
1287 | |
1288 | /* there are new entries if pi != next_idx */ |
1289 | if (pi == s->dqrr.next_idx) |
1290 | return NULL; |
1291 | |
1292 | /* |
1293 | * if next_idx is/was the last ring index, and 'pi' is |
1294 | * different, we can disable the workaround as all the ring |
1295 | * entries have now been DMA'd to so valid-bit checking is |
1296 | * repaired. Note: this logic needs to be based on next_idx |
1297 | * (which increments one at a time), rather than on pi (which |
1298 | * can burst and wrap-around between our snapshots of it). |
1299 | */ |
1300 | if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) { |
1301 | pr_debug("next_idx=%d, pi=%d, clear reset bug\n" , |
1302 | s->dqrr.next_idx, pi); |
1303 | s->dqrr.reset_bug = 0; |
1304 | } |
1305 | prefetch(x: qbman_get_cmd(p: s, |
1306 | QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); |
1307 | } |
1308 | |
1309 | p = qbman_get_cmd(p: s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx)); |
1310 | verb = p->dq.verb; |
1311 | |
1312 | /* |
1313 | * If the valid-bit isn't of the expected polarity, nothing there. Note, |
1314 | * in the DQRR reset bug workaround, we shouldn't need to skip these |
1315 | * check, because we've already determined that a new entry is available |
1316 | * and we've invalidated the cacheline before reading it, so the |
1317 | * valid-bit behaviour is repaired and should tell us what we already |
1318 | * knew from reading PI. |
1319 | */ |
1320 | if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) { |
1321 | prefetch(x: qbman_get_cmd(p: s, |
1322 | QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); |
1323 | return NULL; |
1324 | } |
1325 | /* |
1326 | * There's something there. Move "next_idx" attention to the next ring |
1327 | * entry (and prefetch it) before returning what we found. |
1328 | */ |
1329 | s->dqrr.next_idx++; |
1330 | s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */ |
1331 | if (!s->dqrr.next_idx) |
1332 | s->dqrr.valid_bit ^= QB_VALID_BIT; |
1333 | |
1334 | /* |
1335 | * If this is the final response to a volatile dequeue command |
1336 | * indicate that the vdq is available |
1337 | */ |
1338 | flags = p->dq.stat; |
1339 | response_verb = verb & QBMAN_RESULT_MASK; |
1340 | if ((response_verb == QBMAN_RESULT_DQ) && |
1341 | (flags & DPAA2_DQ_STAT_VOLATILE) && |
1342 | (flags & DPAA2_DQ_STAT_EXPIRED)) |
1343 | atomic_inc(v: &s->vdq.available); |
1344 | |
1345 | prefetch(x: qbman_get_cmd(p: s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); |
1346 | |
1347 | return p; |
1348 | } |
1349 | |
1350 | /** |
1351 | * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from |
1352 | * qbman_swp_dqrr_next(). |
1353 | * @s: the software portal object |
1354 | * @dq: the DQRR entry to be consumed |
1355 | */ |
1356 | void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq) |
1357 | { |
1358 | qbman_write_register(p: s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq)); |
1359 | } |
1360 | |
1361 | /** |
1362 | * qbman_result_has_new_result() - Check and get the dequeue response from the |
1363 | * dq storage memory set in pull dequeue command |
1364 | * @s: the software portal object |
1365 | * @dq: the dequeue result read from the memory |
1366 | * |
1367 | * Return 1 for getting a valid dequeue result, or 0 for not getting a valid |
1368 | * dequeue result. |
1369 | * |
1370 | * Only used for user-provided storage of dequeue results, not DQRR. For |
1371 | * efficiency purposes, the driver will perform any required endianness |
1372 | * conversion to ensure that the user's dequeue result storage is in host-endian |
1373 | * format. As such, once the user has called qbman_result_has_new_result() and |
1374 | * been returned a valid dequeue result, they should not call it again on |
1375 | * the same memory location (except of course if another dequeue command has |
1376 | * been executed to produce a new result to that location). |
1377 | */ |
1378 | int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq) |
1379 | { |
1380 | if (dq->dq.tok != QMAN_DQ_TOKEN_VALID) |
1381 | return 0; |
1382 | |
1383 | /* |
1384 | * Set token to be 0 so we will detect change back to 1 |
1385 | * next time the looping is traversed. Const is cast away here |
1386 | * as we want users to treat the dequeue responses as read only. |
1387 | */ |
1388 | ((struct dpaa2_dq *)dq)->dq.tok = 0; |
1389 | |
1390 | /* |
1391 | * Determine whether VDQCR is available based on whether the |
1392 | * current result is sitting in the first storage location of |
1393 | * the busy command. |
1394 | */ |
1395 | if (s->vdq.storage == dq) { |
1396 | s->vdq.storage = NULL; |
1397 | atomic_inc(v: &s->vdq.available); |
1398 | } |
1399 | |
1400 | return 1; |
1401 | } |
1402 | |
1403 | /** |
1404 | * qbman_release_desc_clear() - Clear the contents of a descriptor to |
1405 | * default/starting state. |
1406 | * @d: the pull dequeue descriptor to be cleared |
1407 | */ |
1408 | void qbman_release_desc_clear(struct qbman_release_desc *d) |
1409 | { |
1410 | memset(d, 0, sizeof(*d)); |
1411 | d->verb = 1 << 5; /* Release Command Valid */ |
1412 | } |
1413 | |
1414 | /** |
1415 | * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to |
1416 | * @d: the pull dequeue descriptor to be set |
1417 | * @bpid: the bpid value to be set |
1418 | */ |
1419 | void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid) |
1420 | { |
1421 | d->bpid = cpu_to_le16(bpid); |
1422 | } |
1423 | |
1424 | /** |
1425 | * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI |
1426 | * interrupt source should be asserted after the release command is completed. |
1427 | * @d: the pull dequeue descriptor to be set |
1428 | * @enable: enable (1) or disable (0) value |
1429 | */ |
1430 | void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable) |
1431 | { |
1432 | if (enable) |
1433 | d->verb |= 1 << 6; |
1434 | else |
1435 | d->verb &= ~(1 << 6); |
1436 | } |
1437 | |
1438 | #define RAR_IDX(rar) ((rar) & 0x7) |
1439 | #define RAR_VB(rar) ((rar) & 0x80) |
1440 | #define RAR_SUCCESS(rar) ((rar) & 0x100) |
1441 | |
1442 | /** |
1443 | * qbman_swp_release_direct() - Issue a buffer release command |
1444 | * @s: the software portal object |
1445 | * @d: the release descriptor |
1446 | * @buffers: a pointer pointing to the buffer address to be released |
1447 | * @num_buffers: number of buffers to be released, must be less than 8 |
1448 | * |
1449 | * Return 0 for success, -EBUSY if the release command ring is not ready. |
1450 | */ |
1451 | int qbman_swp_release_direct(struct qbman_swp *s, |
1452 | const struct qbman_release_desc *d, |
1453 | const u64 *buffers, unsigned int num_buffers) |
1454 | { |
1455 | int i; |
1456 | struct qbman_release_desc *p; |
1457 | u32 rar; |
1458 | |
1459 | if (!num_buffers || (num_buffers > 7)) |
1460 | return -EINVAL; |
1461 | |
1462 | rar = qbman_read_register(p: s, QBMAN_CINH_SWP_RAR); |
1463 | if (!RAR_SUCCESS(rar)) |
1464 | return -EBUSY; |
1465 | |
1466 | /* Start the release command */ |
1467 | p = qbman_get_cmd(p: s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); |
1468 | |
1469 | /* Copy the caller's buffer pointers to the command */ |
1470 | for (i = 0; i < num_buffers; i++) |
1471 | p->buf[i] = cpu_to_le64(buffers[i]); |
1472 | p->bpid = d->bpid; |
1473 | |
1474 | /* |
1475 | * Set the verb byte, have to substitute in the valid-bit |
1476 | * and the number of buffers. |
1477 | */ |
1478 | dma_wmb(); |
1479 | p->verb = d->verb | RAR_VB(rar) | num_buffers; |
1480 | |
1481 | return 0; |
1482 | } |
1483 | |
1484 | /** |
1485 | * qbman_swp_release_mem_back() - Issue a buffer release command |
1486 | * @s: the software portal object |
1487 | * @d: the release descriptor |
1488 | * @buffers: a pointer pointing to the buffer address to be released |
1489 | * @num_buffers: number of buffers to be released, must be less than 8 |
1490 | * |
1491 | * Return 0 for success, -EBUSY if the release command ring is not ready. |
1492 | */ |
1493 | int qbman_swp_release_mem_back(struct qbman_swp *s, |
1494 | const struct qbman_release_desc *d, |
1495 | const u64 *buffers, unsigned int num_buffers) |
1496 | { |
1497 | int i; |
1498 | struct qbman_release_desc *p; |
1499 | u32 rar; |
1500 | |
1501 | if (!num_buffers || (num_buffers > 7)) |
1502 | return -EINVAL; |
1503 | |
1504 | rar = qbman_read_register(p: s, QBMAN_CINH_SWP_RAR); |
1505 | if (!RAR_SUCCESS(rar)) |
1506 | return -EBUSY; |
1507 | |
1508 | /* Start the release command */ |
1509 | p = qbman_get_cmd(p: s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar))); |
1510 | |
1511 | /* Copy the caller's buffer pointers to the command */ |
1512 | for (i = 0; i < num_buffers; i++) |
1513 | p->buf[i] = cpu_to_le64(buffers[i]); |
1514 | p->bpid = d->bpid; |
1515 | |
1516 | p->verb = d->verb | RAR_VB(rar) | num_buffers; |
1517 | dma_wmb(); |
1518 | qbman_write_register(p: s, QBMAN_CINH_SWP_RCR_AM_RT + |
1519 | RAR_IDX(rar) * 4, QMAN_RT_MODE); |
1520 | |
1521 | return 0; |
1522 | } |
1523 | |
1524 | struct qbman_acquire_desc { |
1525 | u8 verb; |
1526 | u8 reserved; |
1527 | __le16 bpid; |
1528 | u8 num; |
1529 | u8 reserved2[59]; |
1530 | }; |
1531 | |
1532 | struct qbman_acquire_rslt { |
1533 | u8 verb; |
1534 | u8 rslt; |
1535 | __le16 reserved; |
1536 | u8 num; |
1537 | u8 reserved2[3]; |
1538 | __le64 buf[7]; |
1539 | }; |
1540 | |
1541 | /** |
1542 | * qbman_swp_acquire() - Issue a buffer acquire command |
1543 | * @s: the software portal object |
1544 | * @bpid: the buffer pool index |
1545 | * @buffers: a pointer pointing to the acquired buffer addresses |
1546 | * @num_buffers: number of buffers to be acquired, must be less than 8 |
1547 | * |
1548 | * Return 0 for success, or negative error code if the acquire command |
1549 | * fails. |
1550 | */ |
1551 | int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers, |
1552 | unsigned int num_buffers) |
1553 | { |
1554 | struct qbman_acquire_desc *p; |
1555 | struct qbman_acquire_rslt *r; |
1556 | int i; |
1557 | |
1558 | if (!num_buffers || (num_buffers > 7)) |
1559 | return -EINVAL; |
1560 | |
1561 | /* Start the management command */ |
1562 | p = qbman_swp_mc_start(p: s); |
1563 | |
1564 | if (!p) |
1565 | return -EBUSY; |
1566 | |
1567 | /* Encode the caller-provided attributes */ |
1568 | p->bpid = cpu_to_le16(bpid); |
1569 | p->num = num_buffers; |
1570 | |
1571 | /* Complete the management command */ |
1572 | r = qbman_swp_mc_complete(swp: s, cmd: p, QBMAN_MC_ACQUIRE); |
1573 | if (unlikely(!r)) { |
1574 | pr_err("qbman: acquire from BPID %d failed, no response\n" , |
1575 | bpid); |
1576 | return -EIO; |
1577 | } |
1578 | |
1579 | /* Decode the outcome */ |
1580 | WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE); |
1581 | |
1582 | /* Determine success or failure */ |
1583 | if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) { |
1584 | pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n" , |
1585 | bpid, r->rslt); |
1586 | return -EIO; |
1587 | } |
1588 | |
1589 | WARN_ON(r->num > num_buffers); |
1590 | |
1591 | /* Copy the acquired buffers to the caller's array */ |
1592 | for (i = 0; i < r->num; i++) |
1593 | buffers[i] = le64_to_cpu(r->buf[i]); |
1594 | |
1595 | return (int)r->num; |
1596 | } |
1597 | |
1598 | struct qbman_alt_fq_state_desc { |
1599 | u8 verb; |
1600 | u8 reserved[3]; |
1601 | __le32 fqid; |
1602 | u8 reserved2[56]; |
1603 | }; |
1604 | |
1605 | struct qbman_alt_fq_state_rslt { |
1606 | u8 verb; |
1607 | u8 rslt; |
1608 | u8 reserved[62]; |
1609 | }; |
1610 | |
1611 | #define ALT_FQ_FQID_MASK 0x00FFFFFF |
1612 | |
1613 | int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid, |
1614 | u8 alt_fq_verb) |
1615 | { |
1616 | struct qbman_alt_fq_state_desc *p; |
1617 | struct qbman_alt_fq_state_rslt *r; |
1618 | |
1619 | /* Start the management command */ |
1620 | p = qbman_swp_mc_start(p: s); |
1621 | if (!p) |
1622 | return -EBUSY; |
1623 | |
1624 | p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK); |
1625 | |
1626 | /* Complete the management command */ |
1627 | r = qbman_swp_mc_complete(swp: s, cmd: p, cmd_verb: alt_fq_verb); |
1628 | if (unlikely(!r)) { |
1629 | pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n" , |
1630 | alt_fq_verb); |
1631 | return -EIO; |
1632 | } |
1633 | |
1634 | /* Decode the outcome */ |
1635 | WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb); |
1636 | |
1637 | /* Determine success or failure */ |
1638 | if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) { |
1639 | pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n" , |
1640 | fqid, r->verb, r->rslt); |
1641 | return -EIO; |
1642 | } |
1643 | |
1644 | return 0; |
1645 | } |
1646 | |
1647 | struct qbman_cdan_ctrl_desc { |
1648 | u8 verb; |
1649 | u8 reserved; |
1650 | __le16 ch; |
1651 | u8 we; |
1652 | u8 ctrl; |
1653 | __le16 reserved2; |
1654 | __le64 cdan_ctx; |
1655 | u8 reserved3[48]; |
1656 | |
1657 | }; |
1658 | |
1659 | struct qbman_cdan_ctrl_rslt { |
1660 | u8 verb; |
1661 | u8 rslt; |
1662 | __le16 ch; |
1663 | u8 reserved[60]; |
1664 | }; |
1665 | |
1666 | int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid, |
1667 | u8 we_mask, u8 cdan_en, |
1668 | u64 ctx) |
1669 | { |
1670 | struct qbman_cdan_ctrl_desc *p = NULL; |
1671 | struct qbman_cdan_ctrl_rslt *r = NULL; |
1672 | |
1673 | /* Start the management command */ |
1674 | p = qbman_swp_mc_start(p: s); |
1675 | if (!p) |
1676 | return -EBUSY; |
1677 | |
1678 | /* Encode the caller-provided attributes */ |
1679 | p->ch = cpu_to_le16(channelid); |
1680 | p->we = we_mask; |
1681 | if (cdan_en) |
1682 | p->ctrl = 1; |
1683 | else |
1684 | p->ctrl = 0; |
1685 | p->cdan_ctx = cpu_to_le64(ctx); |
1686 | |
1687 | /* Complete the management command */ |
1688 | r = qbman_swp_mc_complete(swp: s, cmd: p, QBMAN_WQCHAN_CONFIGURE); |
1689 | if (unlikely(!r)) { |
1690 | pr_err("qbman: wqchan config failed, no response\n" ); |
1691 | return -EIO; |
1692 | } |
1693 | |
1694 | WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE); |
1695 | |
1696 | /* Determine success or failure */ |
1697 | if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) { |
1698 | pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n" , |
1699 | channelid, r->rslt); |
1700 | return -EIO; |
1701 | } |
1702 | |
1703 | return 0; |
1704 | } |
1705 | |
1706 | #define QBMAN_RESPONSE_VERB_MASK 0x7f |
1707 | #define QBMAN_FQ_QUERY_NP 0x45 |
1708 | #define QBMAN_BP_QUERY 0x32 |
1709 | |
1710 | struct qbman_fq_query_desc { |
1711 | u8 verb; |
1712 | u8 reserved[3]; |
1713 | __le32 fqid; |
1714 | u8 reserved2[56]; |
1715 | }; |
1716 | |
1717 | int qbman_fq_query_state(struct qbman_swp *s, u32 fqid, |
1718 | struct qbman_fq_query_np_rslt *r) |
1719 | { |
1720 | struct qbman_fq_query_desc *p; |
1721 | void *resp; |
1722 | |
1723 | p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(p: s); |
1724 | if (!p) |
1725 | return -EBUSY; |
1726 | |
1727 | /* FQID is a 24 bit value */ |
1728 | p->fqid = cpu_to_le32(fqid & 0x00FFFFFF); |
1729 | resp = qbman_swp_mc_complete(swp: s, cmd: p, QBMAN_FQ_QUERY_NP); |
1730 | if (!resp) { |
1731 | pr_err("qbman: Query FQID %d NP fields failed, no response\n" , |
1732 | fqid); |
1733 | return -EIO; |
1734 | } |
1735 | *r = *(struct qbman_fq_query_np_rslt *)resp; |
1736 | /* Decode the outcome */ |
1737 | WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP); |
1738 | |
1739 | /* Determine success or failure */ |
1740 | if (r->rslt != QBMAN_MC_RSLT_OK) { |
1741 | pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n" , |
1742 | p->fqid, r->rslt); |
1743 | return -EIO; |
1744 | } |
1745 | |
1746 | return 0; |
1747 | } |
1748 | |
1749 | u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r) |
1750 | { |
1751 | return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF); |
1752 | } |
1753 | |
1754 | u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r) |
1755 | { |
1756 | return le32_to_cpu(r->byte_cnt); |
1757 | } |
1758 | |
1759 | struct qbman_bp_query_desc { |
1760 | u8 verb; |
1761 | u8 reserved; |
1762 | __le16 bpid; |
1763 | u8 reserved2[60]; |
1764 | }; |
1765 | |
1766 | int qbman_bp_query(struct qbman_swp *s, u16 bpid, |
1767 | struct qbman_bp_query_rslt *r) |
1768 | { |
1769 | struct qbman_bp_query_desc *p; |
1770 | void *resp; |
1771 | |
1772 | p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(p: s); |
1773 | if (!p) |
1774 | return -EBUSY; |
1775 | |
1776 | p->bpid = cpu_to_le16(bpid); |
1777 | resp = qbman_swp_mc_complete(swp: s, cmd: p, QBMAN_BP_QUERY); |
1778 | if (!resp) { |
1779 | pr_err("qbman: Query BPID %d fields failed, no response\n" , |
1780 | bpid); |
1781 | return -EIO; |
1782 | } |
1783 | *r = *(struct qbman_bp_query_rslt *)resp; |
1784 | /* Decode the outcome */ |
1785 | WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY); |
1786 | |
1787 | /* Determine success or failure */ |
1788 | if (r->rslt != QBMAN_MC_RSLT_OK) { |
1789 | pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n" , |
1790 | bpid, r->rslt); |
1791 | return -EIO; |
1792 | } |
1793 | |
1794 | return 0; |
1795 | } |
1796 | |
1797 | u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a) |
1798 | { |
1799 | return le32_to_cpu(a->fill); |
1800 | } |
1801 | |
1802 | /** |
1803 | * qbman_swp_set_irq_coalescing() - Set new IRQ coalescing values |
1804 | * @p: the software portal object |
1805 | * @irq_threshold: interrupt threshold |
1806 | * @irq_holdoff: interrupt holdoff (timeout) period in us |
1807 | * |
1808 | * Return 0 for success, or negative error code on error. |
1809 | */ |
1810 | int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold, |
1811 | u32 irq_holdoff) |
1812 | { |
1813 | u32 itp, max_holdoff; |
1814 | |
1815 | /* Convert irq_holdoff value from usecs to 256 QBMAN clock cycles |
1816 | * increments. This depends on the QBMAN internal frequency. |
1817 | */ |
1818 | itp = (irq_holdoff * 1000) / p->desc->qman_256_cycles_per_ns; |
1819 | if (itp > 4096) { |
1820 | max_holdoff = (p->desc->qman_256_cycles_per_ns * 4096) / 1000; |
1821 | pr_err("irq_holdoff must be <= %uus\n" , max_holdoff); |
1822 | return -EINVAL; |
1823 | } |
1824 | |
1825 | if (irq_threshold >= p->dqrr.dqrr_size) { |
1826 | pr_err("irq_threshold must be < %u\n" , p->dqrr.dqrr_size - 1); |
1827 | return -EINVAL; |
1828 | } |
1829 | |
1830 | p->irq_threshold = irq_threshold; |
1831 | p->irq_holdoff = irq_holdoff; |
1832 | |
1833 | qbman_write_register(p, QBMAN_CINH_SWP_DQRR_ITR, value: irq_threshold); |
1834 | qbman_write_register(p, QBMAN_CINH_SWP_ITPR, value: itp); |
1835 | |
1836 | return 0; |
1837 | } |
1838 | |
1839 | /** |
1840 | * qbman_swp_get_irq_coalescing() - Get the current IRQ coalescing parameters |
1841 | * @p: the software portal object |
1842 | * @irq_threshold: interrupt threshold (an IRQ is generated when there are more |
1843 | * DQRR entries in the portal than the threshold) |
1844 | * @irq_holdoff: interrupt holdoff (timeout) period in us |
1845 | */ |
1846 | void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold, |
1847 | u32 *irq_holdoff) |
1848 | { |
1849 | if (irq_threshold) |
1850 | *irq_threshold = p->irq_threshold; |
1851 | if (irq_holdoff) |
1852 | *irq_holdoff = p->irq_holdoff; |
1853 | } |
1854 | |