1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. |
2 | * |
3 | * Redistribution and use in source and binary forms, with or without |
4 | * modification, are permitted provided that the following conditions are met: |
5 | * * Redistributions of source code must retain the above copyright |
6 | * notice, this list of conditions and the following disclaimer. |
7 | * * Redistributions in binary form must reproduce the above copyright |
8 | * notice, this list of conditions and the following disclaimer in the |
9 | * documentation and/or other materials provided with the distribution. |
10 | * * Neither the name of Freescale Semiconductor nor the |
11 | * names of its contributors may be used to endorse or promote products |
12 | * derived from this software without specific prior written permission. |
13 | * |
14 | * ALTERNATIVELY, this software may be distributed under the terms of the |
15 | * GNU General Public License ("GPL") as published by the Free Software |
16 | * Foundation, either version 2 of that License or (at your option) any |
17 | * later version. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY |
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY |
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
29 | */ |
30 | |
31 | #include "qman_priv.h" |
32 | |
33 | #define DQRR_MAXFILL 15 |
34 | #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */ |
35 | #define IRQNAME "QMan portal %d" |
36 | #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */ |
37 | #define QMAN_POLL_LIMIT 32 |
38 | #define QMAN_PIRQ_DQRR_ITHRESH 12 |
39 | #define QMAN_DQRR_IT_MAX 15 |
40 | #define QMAN_ITP_MAX 0xFFF |
41 | #define QMAN_PIRQ_MR_ITHRESH 4 |
42 | #define QMAN_PIRQ_IPERIOD 100 |
43 | |
44 | /* Portal register assists */ |
45 | |
46 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) |
47 | /* Cache-inhibited register offsets */ |
48 | #define QM_REG_EQCR_PI_CINH 0x3000 |
49 | #define QM_REG_EQCR_CI_CINH 0x3040 |
50 | #define QM_REG_EQCR_ITR 0x3080 |
51 | #define QM_REG_DQRR_PI_CINH 0x3100 |
52 | #define QM_REG_DQRR_CI_CINH 0x3140 |
53 | #define QM_REG_DQRR_ITR 0x3180 |
54 | #define QM_REG_DQRR_DCAP 0x31C0 |
55 | #define QM_REG_DQRR_SDQCR 0x3200 |
56 | #define QM_REG_DQRR_VDQCR 0x3240 |
57 | #define QM_REG_DQRR_PDQCR 0x3280 |
58 | #define QM_REG_MR_PI_CINH 0x3300 |
59 | #define QM_REG_MR_CI_CINH 0x3340 |
60 | #define QM_REG_MR_ITR 0x3380 |
61 | #define QM_REG_CFG 0x3500 |
62 | #define QM_REG_ISR 0x3600 |
63 | #define QM_REG_IER 0x3640 |
64 | #define QM_REG_ISDR 0x3680 |
65 | #define QM_REG_IIR 0x36C0 |
66 | #define QM_REG_ITPR 0x3740 |
67 | |
68 | /* Cache-enabled register offsets */ |
69 | #define QM_CL_EQCR 0x0000 |
70 | #define QM_CL_DQRR 0x1000 |
71 | #define QM_CL_MR 0x2000 |
72 | #define QM_CL_EQCR_PI_CENA 0x3000 |
73 | #define QM_CL_EQCR_CI_CENA 0x3040 |
74 | #define QM_CL_DQRR_PI_CENA 0x3100 |
75 | #define QM_CL_DQRR_CI_CENA 0x3140 |
76 | #define QM_CL_MR_PI_CENA 0x3300 |
77 | #define QM_CL_MR_CI_CENA 0x3340 |
78 | #define QM_CL_CR 0x3800 |
79 | #define QM_CL_RR0 0x3900 |
80 | #define QM_CL_RR1 0x3940 |
81 | |
82 | #else |
83 | /* Cache-inhibited register offsets */ |
84 | #define QM_REG_EQCR_PI_CINH 0x0000 |
85 | #define QM_REG_EQCR_CI_CINH 0x0004 |
86 | #define QM_REG_EQCR_ITR 0x0008 |
87 | #define QM_REG_DQRR_PI_CINH 0x0040 |
88 | #define QM_REG_DQRR_CI_CINH 0x0044 |
89 | #define QM_REG_DQRR_ITR 0x0048 |
90 | #define QM_REG_DQRR_DCAP 0x0050 |
91 | #define QM_REG_DQRR_SDQCR 0x0054 |
92 | #define QM_REG_DQRR_VDQCR 0x0058 |
93 | #define QM_REG_DQRR_PDQCR 0x005c |
94 | #define QM_REG_MR_PI_CINH 0x0080 |
95 | #define QM_REG_MR_CI_CINH 0x0084 |
96 | #define QM_REG_MR_ITR 0x0088 |
97 | #define QM_REG_CFG 0x0100 |
98 | #define QM_REG_ISR 0x0e00 |
99 | #define QM_REG_IER 0x0e04 |
100 | #define QM_REG_ISDR 0x0e08 |
101 | #define QM_REG_IIR 0x0e0c |
102 | #define QM_REG_ITPR 0x0e14 |
103 | |
104 | /* Cache-enabled register offsets */ |
105 | #define QM_CL_EQCR 0x0000 |
106 | #define QM_CL_DQRR 0x1000 |
107 | #define QM_CL_MR 0x2000 |
108 | #define QM_CL_EQCR_PI_CENA 0x3000 |
109 | #define QM_CL_EQCR_CI_CENA 0x3100 |
110 | #define QM_CL_DQRR_PI_CENA 0x3200 |
111 | #define QM_CL_DQRR_CI_CENA 0x3300 |
112 | #define QM_CL_MR_PI_CENA 0x3400 |
113 | #define QM_CL_MR_CI_CENA 0x3500 |
114 | #define QM_CL_CR 0x3800 |
115 | #define QM_CL_RR0 0x3900 |
116 | #define QM_CL_RR1 0x3940 |
117 | #endif |
118 | |
119 | /* |
120 | * BTW, the drivers (and h/w programming model) already obtain the required |
121 | * synchronisation for portal accesses and data-dependencies. Use of barrier()s |
122 | * or other order-preserving primitives simply degrade performance. Hence the |
123 | * use of the __raw_*() interfaces, which simply ensure that the compiler treats |
124 | * the portal registers as volatile |
125 | */ |
126 | |
127 | /* Cache-enabled ring access */ |
128 | #define qm_cl(base, idx) ((void *)base + ((idx) << 6)) |
129 | |
130 | /* |
131 | * Portal modes. |
132 | * Enum types; |
133 | * pmode == production mode |
134 | * cmode == consumption mode, |
135 | * dmode == h/w dequeue mode. |
136 | * Enum values use 3 letter codes. First letter matches the portal mode, |
137 | * remaining two letters indicate; |
138 | * ci == cache-inhibited portal register |
139 | * ce == cache-enabled portal register |
140 | * vb == in-band valid-bit (cache-enabled) |
141 | * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only |
142 | * As for "enum qm_dqrr_dmode", it should be self-explanatory. |
143 | */ |
144 | enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */ |
145 | qm_eqcr_pci = 0, /* PI index, cache-inhibited */ |
146 | qm_eqcr_pce = 1, /* PI index, cache-enabled */ |
147 | qm_eqcr_pvb = 2 /* valid-bit */ |
148 | }; |
149 | enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */ |
150 | qm_dqrr_dpush = 0, /* SDQCR + VDQCR */ |
151 | qm_dqrr_dpull = 1 /* PDQCR */ |
152 | }; |
153 | enum qm_dqrr_pmode { /* s/w-only */ |
154 | qm_dqrr_pci, /* reads DQRR_PI_CINH */ |
155 | qm_dqrr_pce, /* reads DQRR_PI_CENA */ |
156 | qm_dqrr_pvb /* reads valid-bit */ |
157 | }; |
158 | enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */ |
159 | qm_dqrr_cci = 0, /* CI index, cache-inhibited */ |
160 | qm_dqrr_cce = 1, /* CI index, cache-enabled */ |
161 | qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */ |
162 | }; |
163 | enum qm_mr_pmode { /* s/w-only */ |
164 | qm_mr_pci, /* reads MR_PI_CINH */ |
165 | qm_mr_pce, /* reads MR_PI_CENA */ |
166 | qm_mr_pvb /* reads valid-bit */ |
167 | }; |
168 | enum qm_mr_cmode { /* matches QCSP_CFG::MM */ |
169 | qm_mr_cci = 0, /* CI index, cache-inhibited */ |
170 | qm_mr_cce = 1 /* CI index, cache-enabled */ |
171 | }; |
172 | |
173 | /* --- Portal structures --- */ |
174 | |
175 | #define QM_EQCR_SIZE 8 |
176 | #define QM_DQRR_SIZE 16 |
177 | #define QM_MR_SIZE 8 |
178 | |
179 | /* "Enqueue Command" */ |
180 | struct qm_eqcr_entry { |
181 | u8 _ncw_verb; /* writes to this are non-coherent */ |
182 | u8 dca; |
183 | __be16 seqnum; |
184 | u8 __reserved[4]; |
185 | __be32 fqid; /* 24-bit */ |
186 | __be32 tag; |
187 | struct qm_fd fd; |
188 | u8 __reserved3[32]; |
189 | } __packed __aligned(8); |
190 | #define QM_EQCR_VERB_VBIT 0x80 |
191 | #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ |
192 | #define QM_EQCR_VERB_CMD_ENQUEUE 0x01 |
193 | #define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */ |
194 | #define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */ |
195 | #define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */ |
196 | |
197 | struct qm_eqcr { |
198 | struct qm_eqcr_entry *ring, *cursor; |
199 | u8 ci, available, ithresh, vbit; |
200 | #ifdef CONFIG_FSL_DPAA_CHECKING |
201 | u32 busy; |
202 | enum qm_eqcr_pmode pmode; |
203 | #endif |
204 | }; |
205 | |
206 | struct qm_dqrr { |
207 | const struct qm_dqrr_entry *ring, *cursor; |
208 | u8 pi, ci, fill, ithresh, vbit; |
209 | #ifdef CONFIG_FSL_DPAA_CHECKING |
210 | enum qm_dqrr_dmode dmode; |
211 | enum qm_dqrr_pmode pmode; |
212 | enum qm_dqrr_cmode cmode; |
213 | #endif |
214 | }; |
215 | |
216 | struct qm_mr { |
217 | union qm_mr_entry *ring, *cursor; |
218 | u8 pi, ci, fill, ithresh, vbit; |
219 | #ifdef CONFIG_FSL_DPAA_CHECKING |
220 | enum qm_mr_pmode pmode; |
221 | enum qm_mr_cmode cmode; |
222 | #endif |
223 | }; |
224 | |
225 | /* MC (Management Command) command */ |
226 | /* "FQ" command layout */ |
227 | struct qm_mcc_fq { |
228 | u8 _ncw_verb; |
229 | u8 __reserved1[3]; |
230 | __be32 fqid; /* 24-bit */ |
231 | u8 __reserved2[56]; |
232 | } __packed; |
233 | |
234 | /* "CGR" command layout */ |
235 | struct qm_mcc_cgr { |
236 | u8 _ncw_verb; |
237 | u8 __reserved1[30]; |
238 | u8 cgid; |
239 | u8 __reserved2[32]; |
240 | }; |
241 | |
242 | #define QM_MCC_VERB_VBIT 0x80 |
243 | #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ |
244 | #define QM_MCC_VERB_INITFQ_PARKED 0x40 |
245 | #define QM_MCC_VERB_INITFQ_SCHED 0x41 |
246 | #define QM_MCC_VERB_QUERYFQ 0x44 |
247 | #define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */ |
248 | #define QM_MCC_VERB_QUERYWQ 0x46 |
249 | #define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47 |
250 | #define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */ |
251 | #define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */ |
252 | #define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */ |
253 | #define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */ |
254 | #define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */ |
255 | #define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */ |
256 | #define QM_MCC_VERB_INITCGR 0x50 |
257 | #define QM_MCC_VERB_MODIFYCGR 0x51 |
258 | #define QM_MCC_VERB_CGRTESTWRITE 0x52 |
259 | #define QM_MCC_VERB_QUERYCGR 0x58 |
260 | #define QM_MCC_VERB_QUERYCONGESTION 0x59 |
261 | union qm_mc_command { |
262 | struct { |
263 | u8 _ncw_verb; /* writes to this are non-coherent */ |
264 | u8 __reserved[63]; |
265 | }; |
266 | struct qm_mcc_initfq initfq; |
267 | struct qm_mcc_initcgr initcgr; |
268 | struct qm_mcc_fq fq; |
269 | struct qm_mcc_cgr cgr; |
270 | }; |
271 | |
272 | /* MC (Management Command) result */ |
273 | /* "Query FQ" */ |
274 | struct qm_mcr_queryfq { |
275 | u8 verb; |
276 | u8 result; |
277 | u8 __reserved1[8]; |
278 | struct qm_fqd fqd; /* the FQD fields are here */ |
279 | u8 __reserved2[30]; |
280 | } __packed; |
281 | |
282 | /* "Alter FQ State Commands" */ |
283 | struct qm_mcr_alterfq { |
284 | u8 verb; |
285 | u8 result; |
286 | u8 fqs; /* Frame Queue Status */ |
287 | u8 __reserved1[61]; |
288 | }; |
289 | #define QM_MCR_VERB_RRID 0x80 |
290 | #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK |
291 | #define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED |
292 | #define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED |
293 | #define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ |
294 | #define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP |
295 | #define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ |
296 | #define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED |
297 | #define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED |
298 | #define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE |
299 | #define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE |
300 | #define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS |
301 | #define QM_MCR_RESULT_NULL 0x00 |
302 | #define QM_MCR_RESULT_OK 0xf0 |
303 | #define QM_MCR_RESULT_ERR_FQID 0xf1 |
304 | #define QM_MCR_RESULT_ERR_FQSTATE 0xf2 |
305 | #define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */ |
306 | #define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4 |
307 | #define QM_MCR_RESULT_PENDING 0xf8 |
308 | #define QM_MCR_RESULT_ERR_BADCOMMAND 0xff |
309 | #define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ |
310 | #define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ |
311 | #define QM_MCR_TIMEOUT 10000 /* us */ |
312 | union qm_mc_result { |
313 | struct { |
314 | u8 verb; |
315 | u8 result; |
316 | u8 __reserved1[62]; |
317 | }; |
318 | struct qm_mcr_queryfq queryfq; |
319 | struct qm_mcr_alterfq alterfq; |
320 | struct qm_mcr_querycgr querycgr; |
321 | struct qm_mcr_querycongestion querycongestion; |
322 | struct qm_mcr_querywq querywq; |
323 | struct qm_mcr_queryfq_np queryfq_np; |
324 | }; |
325 | |
326 | struct qm_mc { |
327 | union qm_mc_command *cr; |
328 | union qm_mc_result *rr; |
329 | u8 rridx, vbit; |
330 | #ifdef CONFIG_FSL_DPAA_CHECKING |
331 | enum { |
332 | /* Can be _mc_start()ed */ |
333 | qman_mc_idle, |
334 | /* Can be _mc_commit()ed or _mc_abort()ed */ |
335 | qman_mc_user, |
336 | /* Can only be _mc_retry()ed */ |
337 | qman_mc_hw |
338 | } state; |
339 | #endif |
340 | }; |
341 | |
342 | struct qm_addr { |
343 | void *ce; /* cache-enabled */ |
344 | __be32 *ce_be; /* same value as above but for direct access */ |
345 | void __iomem *ci; /* cache-inhibited */ |
346 | }; |
347 | |
348 | struct qm_portal { |
349 | /* |
350 | * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to |
351 | * and including 'mc' fits within a cacheline (yay!). The 'config' part |
352 | * is setup-only, so isn't a cause for a concern. In other words, don't |
353 | * rearrange this structure on a whim, there be dragons ... |
354 | */ |
355 | struct qm_addr addr; |
356 | struct qm_eqcr eqcr; |
357 | struct qm_dqrr dqrr; |
358 | struct qm_mr mr; |
359 | struct qm_mc mc; |
360 | } ____cacheline_aligned; |
361 | |
362 | /* Cache-inhibited register access. */ |
363 | static inline u32 qm_in(struct qm_portal *p, u32 offset) |
364 | { |
365 | return ioread32be(p->addr.ci + offset); |
366 | } |
367 | |
368 | static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) |
369 | { |
370 | iowrite32be(val, p->addr.ci + offset); |
371 | } |
372 | |
373 | /* Cache Enabled Portal Access */ |
374 | static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset) |
375 | { |
376 | dpaa_invalidate(p->addr.ce + offset); |
377 | } |
378 | |
379 | static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset) |
380 | { |
381 | dpaa_touch_ro(p: p->addr.ce + offset); |
382 | } |
383 | |
384 | static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) |
385 | { |
386 | return be32_to_cpu(*(p->addr.ce_be + (offset/4))); |
387 | } |
388 | |
389 | /* --- EQCR API --- */ |
390 | |
391 | #define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry)) |
392 | #define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT) |
393 | |
394 | /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ |
395 | static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p) |
396 | { |
397 | uintptr_t addr = (uintptr_t)p; |
398 | |
399 | addr &= ~EQCR_CARRY; |
400 | |
401 | return (struct qm_eqcr_entry *)addr; |
402 | } |
403 | |
404 | /* Bit-wise logic to convert a ring pointer to a ring index */ |
405 | static int eqcr_ptr2idx(struct qm_eqcr_entry *e) |
406 | { |
407 | return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1); |
408 | } |
409 | |
410 | /* Increment the 'cursor' ring pointer, taking 'vbit' into account */ |
411 | static inline void eqcr_inc(struct qm_eqcr *eqcr) |
412 | { |
413 | /* increment to the next EQCR pointer and handle overflow and 'vbit' */ |
414 | struct qm_eqcr_entry *partial = eqcr->cursor + 1; |
415 | |
416 | eqcr->cursor = eqcr_carryclear(p: partial); |
417 | if (partial != eqcr->cursor) |
418 | eqcr->vbit ^= QM_EQCR_VERB_VBIT; |
419 | } |
420 | |
421 | static inline int qm_eqcr_init(struct qm_portal *portal, |
422 | enum qm_eqcr_pmode pmode, |
423 | unsigned int eq_stash_thresh, |
424 | int eq_stash_prio) |
425 | { |
426 | struct qm_eqcr *eqcr = &portal->eqcr; |
427 | u32 cfg; |
428 | u8 pi; |
429 | |
430 | eqcr->ring = portal->addr.ce + QM_CL_EQCR; |
431 | eqcr->ci = qm_in(p: portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); |
432 | qm_cl_invalidate(p: portal, QM_CL_EQCR_CI_CENA); |
433 | pi = qm_in(p: portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); |
434 | eqcr->cursor = eqcr->ring + pi; |
435 | eqcr->vbit = (qm_in(p: portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ? |
436 | QM_EQCR_VERB_VBIT : 0; |
437 | eqcr->available = QM_EQCR_SIZE - 1 - |
438 | dpaa_cyc_diff(QM_EQCR_SIZE, first: eqcr->ci, last: pi); |
439 | eqcr->ithresh = qm_in(p: portal, QM_REG_EQCR_ITR); |
440 | #ifdef CONFIG_FSL_DPAA_CHECKING |
441 | eqcr->busy = 0; |
442 | eqcr->pmode = pmode; |
443 | #endif |
444 | cfg = (qm_in(p: portal, QM_REG_CFG) & 0x00ffffff) | |
445 | (eq_stash_thresh << 28) | /* QCSP_CFG: EST */ |
446 | (eq_stash_prio << 26) | /* QCSP_CFG: EP */ |
447 | ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ |
448 | qm_out(p: portal, QM_REG_CFG, val: cfg); |
449 | return 0; |
450 | } |
451 | |
452 | static inline void qm_eqcr_finish(struct qm_portal *portal) |
453 | { |
454 | struct qm_eqcr *eqcr = &portal->eqcr; |
455 | u8 pi = qm_in(p: portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); |
456 | u8 ci = qm_in(p: portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); |
457 | |
458 | DPAA_ASSERT(!eqcr->busy); |
459 | if (pi != eqcr_ptr2idx(e: eqcr->cursor)) |
460 | pr_crit("losing uncommitted EQCR entries\n" ); |
461 | if (ci != eqcr->ci) |
462 | pr_crit("missing existing EQCR completions\n" ); |
463 | if (eqcr->ci != eqcr_ptr2idx(e: eqcr->cursor)) |
464 | pr_crit("EQCR destroyed unquiesced\n" ); |
465 | } |
466 | |
467 | static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal |
468 | *portal) |
469 | { |
470 | struct qm_eqcr *eqcr = &portal->eqcr; |
471 | |
472 | DPAA_ASSERT(!eqcr->busy); |
473 | if (!eqcr->available) |
474 | return NULL; |
475 | |
476 | #ifdef CONFIG_FSL_DPAA_CHECKING |
477 | eqcr->busy = 1; |
478 | #endif |
479 | dpaa_zero(eqcr->cursor); |
480 | return eqcr->cursor; |
481 | } |
482 | |
483 | static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal |
484 | *portal) |
485 | { |
486 | struct qm_eqcr *eqcr = &portal->eqcr; |
487 | u8 diff, old_ci; |
488 | |
489 | DPAA_ASSERT(!eqcr->busy); |
490 | if (!eqcr->available) { |
491 | old_ci = eqcr->ci; |
492 | eqcr->ci = qm_ce_in(p: portal, QM_CL_EQCR_CI_CENA) & |
493 | (QM_EQCR_SIZE - 1); |
494 | diff = dpaa_cyc_diff(QM_EQCR_SIZE, first: old_ci, last: eqcr->ci); |
495 | eqcr->available += diff; |
496 | if (!diff) |
497 | return NULL; |
498 | } |
499 | #ifdef CONFIG_FSL_DPAA_CHECKING |
500 | eqcr->busy = 1; |
501 | #endif |
502 | dpaa_zero(eqcr->cursor); |
503 | return eqcr->cursor; |
504 | } |
505 | |
506 | static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) |
507 | { |
508 | DPAA_ASSERT(eqcr->busy); |
509 | DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK)); |
510 | DPAA_ASSERT(eqcr->available >= 1); |
511 | } |
512 | |
513 | static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb) |
514 | { |
515 | struct qm_eqcr *eqcr = &portal->eqcr; |
516 | struct qm_eqcr_entry *eqcursor; |
517 | |
518 | eqcr_commit_checks(eqcr); |
519 | DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb); |
520 | dma_wmb(); |
521 | eqcursor = eqcr->cursor; |
522 | eqcursor->_ncw_verb = myverb | eqcr->vbit; |
523 | dpaa_flush(p: eqcursor); |
524 | eqcr_inc(eqcr); |
525 | eqcr->available--; |
526 | #ifdef CONFIG_FSL_DPAA_CHECKING |
527 | eqcr->busy = 0; |
528 | #endif |
529 | } |
530 | |
531 | static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal) |
532 | { |
533 | qm_cl_touch_ro(p: portal, QM_CL_EQCR_CI_CENA); |
534 | } |
535 | |
536 | static inline u8 qm_eqcr_cce_update(struct qm_portal *portal) |
537 | { |
538 | struct qm_eqcr *eqcr = &portal->eqcr; |
539 | u8 diff, old_ci = eqcr->ci; |
540 | |
541 | eqcr->ci = qm_ce_in(p: portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1); |
542 | qm_cl_invalidate(p: portal, QM_CL_EQCR_CI_CENA); |
543 | diff = dpaa_cyc_diff(QM_EQCR_SIZE, first: old_ci, last: eqcr->ci); |
544 | eqcr->available += diff; |
545 | return diff; |
546 | } |
547 | |
548 | static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh) |
549 | { |
550 | struct qm_eqcr *eqcr = &portal->eqcr; |
551 | |
552 | eqcr->ithresh = ithresh; |
553 | qm_out(p: portal, QM_REG_EQCR_ITR, val: ithresh); |
554 | } |
555 | |
556 | static inline u8 qm_eqcr_get_avail(struct qm_portal *portal) |
557 | { |
558 | struct qm_eqcr *eqcr = &portal->eqcr; |
559 | |
560 | return eqcr->available; |
561 | } |
562 | |
563 | static inline u8 qm_eqcr_get_fill(struct qm_portal *portal) |
564 | { |
565 | struct qm_eqcr *eqcr = &portal->eqcr; |
566 | |
567 | return QM_EQCR_SIZE - 1 - eqcr->available; |
568 | } |
569 | |
570 | /* --- DQRR API --- */ |
571 | |
572 | #define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry)) |
573 | #define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT) |
574 | |
575 | static const struct qm_dqrr_entry *dqrr_carryclear( |
576 | const struct qm_dqrr_entry *p) |
577 | { |
578 | uintptr_t addr = (uintptr_t)p; |
579 | |
580 | addr &= ~DQRR_CARRY; |
581 | |
582 | return (const struct qm_dqrr_entry *)addr; |
583 | } |
584 | |
585 | static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e) |
586 | { |
587 | return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1); |
588 | } |
589 | |
590 | static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e) |
591 | { |
592 | return dqrr_carryclear(p: e + 1); |
593 | } |
594 | |
595 | static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf) |
596 | { |
597 | qm_out(p: portal, QM_REG_CFG, val: (qm_in(p: portal, QM_REG_CFG) & 0xff0fffff) | |
598 | ((mf & (QM_DQRR_SIZE - 1)) << 20)); |
599 | } |
600 | |
601 | static inline int qm_dqrr_init(struct qm_portal *portal, |
602 | const struct qm_portal_config *config, |
603 | enum qm_dqrr_dmode dmode, |
604 | enum qm_dqrr_pmode pmode, |
605 | enum qm_dqrr_cmode cmode, u8 max_fill) |
606 | { |
607 | struct qm_dqrr *dqrr = &portal->dqrr; |
608 | u32 cfg; |
609 | |
610 | /* Make sure the DQRR will be idle when we enable */ |
611 | qm_out(p: portal, QM_REG_DQRR_SDQCR, val: 0); |
612 | qm_out(p: portal, QM_REG_DQRR_VDQCR, val: 0); |
613 | qm_out(p: portal, QM_REG_DQRR_PDQCR, val: 0); |
614 | dqrr->ring = portal->addr.ce + QM_CL_DQRR; |
615 | dqrr->pi = qm_in(p: portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); |
616 | dqrr->ci = qm_in(p: portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); |
617 | dqrr->cursor = dqrr->ring + dqrr->ci; |
618 | dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, first: dqrr->ci, last: dqrr->pi); |
619 | dqrr->vbit = (qm_in(p: portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ? |
620 | QM_DQRR_VERB_VBIT : 0; |
621 | dqrr->ithresh = qm_in(p: portal, QM_REG_DQRR_ITR); |
622 | #ifdef CONFIG_FSL_DPAA_CHECKING |
623 | dqrr->dmode = dmode; |
624 | dqrr->pmode = pmode; |
625 | dqrr->cmode = cmode; |
626 | #endif |
627 | /* Invalidate every ring entry before beginning */ |
628 | for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++) |
629 | dpaa_invalidate(qm_cl(dqrr->ring, cfg)); |
630 | cfg = (qm_in(p: portal, QM_REG_CFG) & 0xff000f00) | |
631 | ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ |
632 | ((dmode & 1) << 18) | /* DP */ |
633 | ((cmode & 3) << 16) | /* DCM */ |
634 | 0xa0 | /* RE+SE */ |
635 | (0 ? 0x40 : 0) | /* Ignore RP */ |
636 | (0 ? 0x10 : 0); /* Ignore SP */ |
637 | qm_out(p: portal, QM_REG_CFG, val: cfg); |
638 | qm_dqrr_set_maxfill(portal, mf: max_fill); |
639 | return 0; |
640 | } |
641 | |
642 | static inline void qm_dqrr_finish(struct qm_portal *portal) |
643 | { |
644 | #ifdef CONFIG_FSL_DPAA_CHECKING |
645 | struct qm_dqrr *dqrr = &portal->dqrr; |
646 | |
647 | if (dqrr->cmode != qm_dqrr_cdc && |
648 | dqrr->ci != dqrr_ptr2idx(dqrr->cursor)) |
649 | pr_crit("Ignoring completed DQRR entries\n" ); |
650 | #endif |
651 | } |
652 | |
653 | static inline const struct qm_dqrr_entry *qm_dqrr_current( |
654 | struct qm_portal *portal) |
655 | { |
656 | struct qm_dqrr *dqrr = &portal->dqrr; |
657 | |
658 | if (!dqrr->fill) |
659 | return NULL; |
660 | return dqrr->cursor; |
661 | } |
662 | |
663 | static inline u8 qm_dqrr_next(struct qm_portal *portal) |
664 | { |
665 | struct qm_dqrr *dqrr = &portal->dqrr; |
666 | |
667 | DPAA_ASSERT(dqrr->fill); |
668 | dqrr->cursor = dqrr_inc(e: dqrr->cursor); |
669 | return --dqrr->fill; |
670 | } |
671 | |
672 | static inline void qm_dqrr_pvb_update(struct qm_portal *portal) |
673 | { |
674 | struct qm_dqrr *dqrr = &portal->dqrr; |
675 | struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi); |
676 | |
677 | DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb); |
678 | #ifndef CONFIG_FSL_PAMU |
679 | /* |
680 | * If PAMU is not available we need to invalidate the cache. |
681 | * When PAMU is available the cache is updated by stash |
682 | */ |
683 | dpaa_invalidate_touch_ro(p: res); |
684 | #endif |
685 | if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) { |
686 | dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); |
687 | if (!dqrr->pi) |
688 | dqrr->vbit ^= QM_DQRR_VERB_VBIT; |
689 | dqrr->fill++; |
690 | } |
691 | } |
692 | |
693 | static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal, |
694 | const struct qm_dqrr_entry *dq, |
695 | int park) |
696 | { |
697 | __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; |
698 | int idx = dqrr_ptr2idx(e: dq); |
699 | |
700 | DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); |
701 | DPAA_ASSERT((dqrr->ring + idx) == dq); |
702 | DPAA_ASSERT(idx < QM_DQRR_SIZE); |
703 | qm_out(p: portal, QM_REG_DQRR_DCAP, val: (0 << 8) | /* DQRR_DCAP::S */ |
704 | ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */ |
705 | idx); /* DQRR_DCAP::DCAP_CI */ |
706 | } |
707 | |
708 | static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask) |
709 | { |
710 | __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; |
711 | |
712 | DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); |
713 | qm_out(p: portal, QM_REG_DQRR_DCAP, val: (1 << 8) | /* DQRR_DCAP::S */ |
714 | (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */ |
715 | } |
716 | |
717 | static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr) |
718 | { |
719 | qm_out(p: portal, QM_REG_DQRR_SDQCR, val: sdqcr); |
720 | } |
721 | |
722 | static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr) |
723 | { |
724 | qm_out(p: portal, QM_REG_DQRR_VDQCR, val: vdqcr); |
725 | } |
726 | |
727 | static inline int qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh) |
728 | { |
729 | |
730 | if (ithresh > QMAN_DQRR_IT_MAX) |
731 | return -EINVAL; |
732 | |
733 | qm_out(p: portal, QM_REG_DQRR_ITR, val: ithresh); |
734 | |
735 | return 0; |
736 | } |
737 | |
738 | /* --- MR API --- */ |
739 | |
740 | #define MR_SHIFT ilog2(sizeof(union qm_mr_entry)) |
741 | #define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT) |
742 | |
743 | static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p) |
744 | { |
745 | uintptr_t addr = (uintptr_t)p; |
746 | |
747 | addr &= ~MR_CARRY; |
748 | |
749 | return (union qm_mr_entry *)addr; |
750 | } |
751 | |
752 | static inline int mr_ptr2idx(const union qm_mr_entry *e) |
753 | { |
754 | return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1); |
755 | } |
756 | |
757 | static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e) |
758 | { |
759 | return mr_carryclear(p: e + 1); |
760 | } |
761 | |
762 | static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode, |
763 | enum qm_mr_cmode cmode) |
764 | { |
765 | struct qm_mr *mr = &portal->mr; |
766 | u32 cfg; |
767 | |
768 | mr->ring = portal->addr.ce + QM_CL_MR; |
769 | mr->pi = qm_in(p: portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1); |
770 | mr->ci = qm_in(p: portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1); |
771 | mr->cursor = mr->ring + mr->ci; |
772 | mr->fill = dpaa_cyc_diff(QM_MR_SIZE, first: mr->ci, last: mr->pi); |
773 | mr->vbit = (qm_in(p: portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE) |
774 | ? QM_MR_VERB_VBIT : 0; |
775 | mr->ithresh = qm_in(p: portal, QM_REG_MR_ITR); |
776 | #ifdef CONFIG_FSL_DPAA_CHECKING |
777 | mr->pmode = pmode; |
778 | mr->cmode = cmode; |
779 | #endif |
780 | cfg = (qm_in(p: portal, QM_REG_CFG) & 0xfffff0ff) | |
781 | ((cmode & 1) << 8); /* QCSP_CFG:MM */ |
782 | qm_out(p: portal, QM_REG_CFG, val: cfg); |
783 | return 0; |
784 | } |
785 | |
786 | static inline void qm_mr_finish(struct qm_portal *portal) |
787 | { |
788 | struct qm_mr *mr = &portal->mr; |
789 | |
790 | if (mr->ci != mr_ptr2idx(e: mr->cursor)) |
791 | pr_crit("Ignoring completed MR entries\n" ); |
792 | } |
793 | |
794 | static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal) |
795 | { |
796 | struct qm_mr *mr = &portal->mr; |
797 | |
798 | if (!mr->fill) |
799 | return NULL; |
800 | return mr->cursor; |
801 | } |
802 | |
803 | static inline int qm_mr_next(struct qm_portal *portal) |
804 | { |
805 | struct qm_mr *mr = &portal->mr; |
806 | |
807 | DPAA_ASSERT(mr->fill); |
808 | mr->cursor = mr_inc(e: mr->cursor); |
809 | return --mr->fill; |
810 | } |
811 | |
812 | static inline void qm_mr_pvb_update(struct qm_portal *portal) |
813 | { |
814 | struct qm_mr *mr = &portal->mr; |
815 | union qm_mr_entry *res = qm_cl(mr->ring, mr->pi); |
816 | |
817 | DPAA_ASSERT(mr->pmode == qm_mr_pvb); |
818 | |
819 | if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) { |
820 | mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); |
821 | if (!mr->pi) |
822 | mr->vbit ^= QM_MR_VERB_VBIT; |
823 | mr->fill++; |
824 | res = mr_inc(e: res); |
825 | } |
826 | dpaa_invalidate_touch_ro(p: res); |
827 | } |
828 | |
829 | static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num) |
830 | { |
831 | struct qm_mr *mr = &portal->mr; |
832 | |
833 | DPAA_ASSERT(mr->cmode == qm_mr_cci); |
834 | mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); |
835 | qm_out(p: portal, QM_REG_MR_CI_CINH, val: mr->ci); |
836 | } |
837 | |
838 | static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal) |
839 | { |
840 | struct qm_mr *mr = &portal->mr; |
841 | |
842 | DPAA_ASSERT(mr->cmode == qm_mr_cci); |
843 | mr->ci = mr_ptr2idx(e: mr->cursor); |
844 | qm_out(p: portal, QM_REG_MR_CI_CINH, val: mr->ci); |
845 | } |
846 | |
847 | static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh) |
848 | { |
849 | qm_out(p: portal, QM_REG_MR_ITR, val: ithresh); |
850 | } |
851 | |
852 | /* --- Management command API --- */ |
853 | |
854 | static inline int qm_mc_init(struct qm_portal *portal) |
855 | { |
856 | u8 rr0, rr1; |
857 | struct qm_mc *mc = &portal->mc; |
858 | |
859 | mc->cr = portal->addr.ce + QM_CL_CR; |
860 | mc->rr = portal->addr.ce + QM_CL_RR0; |
861 | /* |
862 | * The expected valid bit polarity for the next CR command is 0 |
863 | * if RR1 contains a valid response, and is 1 if RR0 contains a |
864 | * valid response. If both RR contain all 0, this indicates either |
865 | * that no command has been executed since reset (in which case the |
866 | * expected valid bit polarity is 1) |
867 | */ |
868 | rr0 = mc->rr->verb; |
869 | rr1 = (mc->rr+1)->verb; |
870 | if ((rr0 == 0 && rr1 == 0) || rr0 != 0) |
871 | mc->rridx = 1; |
872 | else |
873 | mc->rridx = 0; |
874 | mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; |
875 | #ifdef CONFIG_FSL_DPAA_CHECKING |
876 | mc->state = qman_mc_idle; |
877 | #endif |
878 | return 0; |
879 | } |
880 | |
881 | static inline void qm_mc_finish(struct qm_portal *portal) |
882 | { |
883 | #ifdef CONFIG_FSL_DPAA_CHECKING |
884 | struct qm_mc *mc = &portal->mc; |
885 | |
886 | DPAA_ASSERT(mc->state == qman_mc_idle); |
887 | if (mc->state != qman_mc_idle) |
888 | pr_crit("Losing incomplete MC command\n" ); |
889 | #endif |
890 | } |
891 | |
892 | static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal) |
893 | { |
894 | struct qm_mc *mc = &portal->mc; |
895 | |
896 | DPAA_ASSERT(mc->state == qman_mc_idle); |
897 | #ifdef CONFIG_FSL_DPAA_CHECKING |
898 | mc->state = qman_mc_user; |
899 | #endif |
900 | dpaa_zero(mc->cr); |
901 | return mc->cr; |
902 | } |
903 | |
904 | static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb) |
905 | { |
906 | struct qm_mc *mc = &portal->mc; |
907 | union qm_mc_result *rr = mc->rr + mc->rridx; |
908 | |
909 | DPAA_ASSERT(mc->state == qman_mc_user); |
910 | dma_wmb(); |
911 | mc->cr->_ncw_verb = myverb | mc->vbit; |
912 | dpaa_flush(p: mc->cr); |
913 | dpaa_invalidate_touch_ro(p: rr); |
914 | #ifdef CONFIG_FSL_DPAA_CHECKING |
915 | mc->state = qman_mc_hw; |
916 | #endif |
917 | } |
918 | |
919 | static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal) |
920 | { |
921 | struct qm_mc *mc = &portal->mc; |
922 | union qm_mc_result *rr = mc->rr + mc->rridx; |
923 | |
924 | DPAA_ASSERT(mc->state == qman_mc_hw); |
925 | /* |
926 | * The inactive response register's verb byte always returns zero until |
927 | * its command is submitted and completed. This includes the valid-bit, |
928 | * in case you were wondering... |
929 | */ |
930 | if (!rr->verb) { |
931 | dpaa_invalidate_touch_ro(p: rr); |
932 | return NULL; |
933 | } |
934 | mc->rridx ^= 1; |
935 | mc->vbit ^= QM_MCC_VERB_VBIT; |
936 | #ifdef CONFIG_FSL_DPAA_CHECKING |
937 | mc->state = qman_mc_idle; |
938 | #endif |
939 | return rr; |
940 | } |
941 | |
942 | static inline int qm_mc_result_timeout(struct qm_portal *portal, |
943 | union qm_mc_result **mcr) |
944 | { |
945 | int timeout = QM_MCR_TIMEOUT; |
946 | |
947 | do { |
948 | *mcr = qm_mc_result(portal); |
949 | if (*mcr) |
950 | break; |
951 | udelay(1); |
952 | } while (--timeout); |
953 | |
954 | return timeout; |
955 | } |
956 | |
957 | static inline void fq_set(struct qman_fq *fq, u32 mask) |
958 | { |
959 | fq->flags |= mask; |
960 | } |
961 | |
962 | static inline void fq_clear(struct qman_fq *fq, u32 mask) |
963 | { |
964 | fq->flags &= ~mask; |
965 | } |
966 | |
967 | static inline int fq_isset(struct qman_fq *fq, u32 mask) |
968 | { |
969 | return fq->flags & mask; |
970 | } |
971 | |
972 | static inline int fq_isclear(struct qman_fq *fq, u32 mask) |
973 | { |
974 | return !(fq->flags & mask); |
975 | } |
976 | |
977 | struct qman_portal { |
978 | struct qm_portal p; |
979 | /* PORTAL_BITS_*** - dynamic, strictly internal */ |
980 | unsigned long bits; |
981 | /* interrupt sources processed by portal_isr(), configurable */ |
982 | unsigned long irq_sources; |
983 | u32 use_eqcr_ci_stashing; |
984 | /* only 1 volatile dequeue at a time */ |
985 | struct qman_fq *vdqcr_owned; |
986 | u32 sdqcr; |
987 | /* probing time config params for cpu-affine portals */ |
988 | const struct qm_portal_config *config; |
989 | /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ |
990 | struct qman_cgrs *cgrs; |
991 | /* linked-list of CSCN handlers. */ |
992 | struct list_head cgr_cbs; |
993 | /* list lock */ |
994 | raw_spinlock_t cgr_lock; |
995 | struct work_struct congestion_work; |
996 | struct work_struct mr_work; |
997 | char irqname[MAX_IRQNAME]; |
998 | }; |
999 | |
1000 | static cpumask_t affine_mask; |
1001 | static DEFINE_SPINLOCK(affine_mask_lock); |
1002 | static u16 affine_channels[NR_CPUS]; |
1003 | static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal); |
1004 | struct qman_portal *affine_portals[NR_CPUS]; |
1005 | |
1006 | static inline struct qman_portal *get_affine_portal(void) |
1007 | { |
1008 | return &get_cpu_var(qman_affine_portal); |
1009 | } |
1010 | |
1011 | static inline void put_affine_portal(void) |
1012 | { |
1013 | put_cpu_var(qman_affine_portal); |
1014 | } |
1015 | |
1016 | |
1017 | static inline struct qman_portal *get_portal_for_channel(u16 channel) |
1018 | { |
1019 | int i; |
1020 | |
1021 | for (i = 0; i < num_possible_cpus(); i++) { |
1022 | if (affine_portals[i] && |
1023 | affine_portals[i]->config->channel == channel) |
1024 | return affine_portals[i]; |
1025 | } |
1026 | |
1027 | return NULL; |
1028 | } |
1029 | |
1030 | static struct workqueue_struct *qm_portal_wq; |
1031 | |
1032 | int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh) |
1033 | { |
1034 | int res; |
1035 | |
1036 | if (!portal) |
1037 | return -EINVAL; |
1038 | |
1039 | res = qm_dqrr_set_ithresh(portal: &portal->p, ithresh); |
1040 | if (res) |
1041 | return res; |
1042 | |
1043 | portal->p.dqrr.ithresh = ithresh; |
1044 | |
1045 | return 0; |
1046 | } |
1047 | EXPORT_SYMBOL(qman_dqrr_set_ithresh); |
1048 | |
1049 | void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh) |
1050 | { |
1051 | if (portal && ithresh) |
1052 | *ithresh = qm_in(p: &portal->p, QM_REG_DQRR_ITR); |
1053 | } |
1054 | EXPORT_SYMBOL(qman_dqrr_get_ithresh); |
1055 | |
1056 | void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod) |
1057 | { |
1058 | if (portal && iperiod) |
1059 | *iperiod = qm_in(p: &portal->p, QM_REG_ITPR); |
1060 | } |
1061 | EXPORT_SYMBOL(qman_portal_get_iperiod); |
1062 | |
1063 | int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod) |
1064 | { |
1065 | if (!portal || iperiod > QMAN_ITP_MAX) |
1066 | return -EINVAL; |
1067 | |
1068 | qm_out(p: &portal->p, QM_REG_ITPR, val: iperiod); |
1069 | |
1070 | return 0; |
1071 | } |
1072 | EXPORT_SYMBOL(qman_portal_set_iperiod); |
1073 | |
1074 | int qman_wq_alloc(void) |
1075 | { |
1076 | qm_portal_wq = alloc_workqueue(fmt: "qman_portal_wq" , flags: 0, max_active: 1); |
1077 | if (!qm_portal_wq) |
1078 | return -ENOMEM; |
1079 | return 0; |
1080 | } |
1081 | |
1082 | |
1083 | void qman_enable_irqs(void) |
1084 | { |
1085 | int i; |
1086 | |
1087 | for (i = 0; i < num_possible_cpus(); i++) { |
1088 | if (affine_portals[i]) { |
1089 | qm_out(p: &affine_portals[i]->p, QM_REG_ISR, val: 0xffffffff); |
1090 | qm_out(p: &affine_portals[i]->p, QM_REG_IIR, val: 0); |
1091 | } |
1092 | |
1093 | } |
1094 | } |
1095 | |
1096 | /* |
1097 | * This is what everything can wait on, even if it migrates to a different cpu |
1098 | * to the one whose affine portal it is waiting on. |
1099 | */ |
1100 | static DECLARE_WAIT_QUEUE_HEAD(affine_queue); |
1101 | |
1102 | static struct qman_fq **fq_table; |
1103 | static u32 num_fqids; |
1104 | |
1105 | int qman_alloc_fq_table(u32 _num_fqids) |
1106 | { |
1107 | num_fqids = _num_fqids; |
1108 | |
1109 | fq_table = vzalloc(array3_size(sizeof(struct qman_fq *), |
1110 | num_fqids, 2)); |
1111 | if (!fq_table) |
1112 | return -ENOMEM; |
1113 | |
1114 | pr_debug("Allocated fq lookup table at %p, entry count %u\n" , |
1115 | fq_table, num_fqids * 2); |
1116 | return 0; |
1117 | } |
1118 | |
1119 | static struct qman_fq *idx_to_fq(u32 idx) |
1120 | { |
1121 | struct qman_fq *fq; |
1122 | |
1123 | #ifdef CONFIG_FSL_DPAA_CHECKING |
1124 | if (WARN_ON(idx >= num_fqids * 2)) |
1125 | return NULL; |
1126 | #endif |
1127 | fq = fq_table[idx]; |
1128 | DPAA_ASSERT(!fq || idx == fq->idx); |
1129 | |
1130 | return fq; |
1131 | } |
1132 | |
1133 | /* |
1134 | * Only returns full-service fq objects, not enqueue-only |
1135 | * references (QMAN_FQ_FLAG_NO_MODIFY). |
1136 | */ |
1137 | static struct qman_fq *fqid_to_fq(u32 fqid) |
1138 | { |
1139 | return idx_to_fq(idx: fqid * 2); |
1140 | } |
1141 | |
1142 | static struct qman_fq *tag_to_fq(u32 tag) |
1143 | { |
1144 | #if BITS_PER_LONG == 64 |
1145 | return idx_to_fq(idx: tag); |
1146 | #else |
1147 | return (struct qman_fq *)tag; |
1148 | #endif |
1149 | } |
1150 | |
1151 | static u32 fq_to_tag(struct qman_fq *fq) |
1152 | { |
1153 | #if BITS_PER_LONG == 64 |
1154 | return fq->idx; |
1155 | #else |
1156 | return (u32)fq; |
1157 | #endif |
1158 | } |
1159 | |
1160 | static u32 __poll_portal_slow(struct qman_portal *p, u32 is); |
1161 | static inline unsigned int __poll_portal_fast(struct qman_portal *p, |
1162 | unsigned int poll_limit, bool sched_napi); |
1163 | static void qm_congestion_task(struct work_struct *work); |
1164 | static void qm_mr_process_task(struct work_struct *work); |
1165 | |
1166 | static irqreturn_t portal_isr(int irq, void *ptr) |
1167 | { |
1168 | struct qman_portal *p = ptr; |
1169 | u32 is = qm_in(p: &p->p, QM_REG_ISR) & p->irq_sources; |
1170 | u32 clear = 0; |
1171 | |
1172 | if (unlikely(!is)) |
1173 | return IRQ_NONE; |
1174 | |
1175 | /* DQRR-handling if it's interrupt-driven */ |
1176 | if (is & QM_PIRQ_DQRI) { |
1177 | __poll_portal_fast(p, QMAN_POLL_LIMIT, sched_napi: true); |
1178 | clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI; |
1179 | } |
1180 | /* Handling of anything else that's interrupt-driven */ |
1181 | clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW; |
1182 | qm_out(p: &p->p, QM_REG_ISR, val: clear); |
1183 | return IRQ_HANDLED; |
1184 | } |
1185 | |
1186 | static int drain_mr_fqrni(struct qm_portal *p) |
1187 | { |
1188 | const union qm_mr_entry *msg; |
1189 | loop: |
1190 | qm_mr_pvb_update(portal: p); |
1191 | msg = qm_mr_current(portal: p); |
1192 | if (!msg) { |
1193 | /* |
1194 | * if MR was full and h/w had other FQRNI entries to produce, we |
1195 | * need to allow it time to produce those entries once the |
1196 | * existing entries are consumed. A worst-case situation |
1197 | * (fully-loaded system) means h/w sequencers may have to do 3-4 |
1198 | * other things before servicing the portal's MR pump, each of |
1199 | * which (if slow) may take ~50 qman cycles (which is ~200 |
1200 | * processor cycles). So rounding up and then multiplying this |
1201 | * worst-case estimate by a factor of 10, just to be |
1202 | * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume |
1203 | * one entry at a time, so h/w has an opportunity to produce new |
1204 | * entries well before the ring has been fully consumed, so |
1205 | * we're being *really* paranoid here. |
1206 | */ |
1207 | mdelay(1); |
1208 | qm_mr_pvb_update(portal: p); |
1209 | msg = qm_mr_current(portal: p); |
1210 | if (!msg) |
1211 | return 0; |
1212 | } |
1213 | if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { |
1214 | /* We aren't draining anything but FQRNIs */ |
1215 | pr_err("Found verb 0x%x in MR\n" , msg->verb); |
1216 | return -1; |
1217 | } |
1218 | qm_mr_next(portal: p); |
1219 | qm_mr_cci_consume(portal: p, num: 1); |
1220 | goto loop; |
1221 | } |
1222 | |
1223 | static int qman_create_portal(struct qman_portal *portal, |
1224 | const struct qm_portal_config *c, |
1225 | const struct qman_cgrs *cgrs) |
1226 | { |
1227 | struct qm_portal *p; |
1228 | int ret; |
1229 | u32 isdr; |
1230 | |
1231 | p = &portal->p; |
1232 | |
1233 | #ifdef CONFIG_FSL_PAMU |
1234 | /* PAMU is required for stashing */ |
1235 | portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0); |
1236 | #else |
1237 | portal->use_eqcr_ci_stashing = 0; |
1238 | #endif |
1239 | /* |
1240 | * prep the low-level portal struct with the mapped addresses from the |
1241 | * config, everything that follows depends on it and "config" is more |
1242 | * for (de)reference |
1243 | */ |
1244 | p->addr.ce = c->addr_virt_ce; |
1245 | p->addr.ce_be = c->addr_virt_ce; |
1246 | p->addr.ci = c->addr_virt_ci; |
1247 | /* |
1248 | * If CI-stashing is used, the current defaults use a threshold of 3, |
1249 | * and stash with high-than-DQRR priority. |
1250 | */ |
1251 | if (qm_eqcr_init(portal: p, pmode: qm_eqcr_pvb, |
1252 | eq_stash_thresh: portal->use_eqcr_ci_stashing ? 3 : 0, eq_stash_prio: 1)) { |
1253 | dev_err(c->dev, "EQCR initialisation failed\n" ); |
1254 | goto fail_eqcr; |
1255 | } |
1256 | if (qm_dqrr_init(portal: p, config: c, dmode: qm_dqrr_dpush, pmode: qm_dqrr_pvb, |
1257 | cmode: qm_dqrr_cdc, DQRR_MAXFILL)) { |
1258 | dev_err(c->dev, "DQRR initialisation failed\n" ); |
1259 | goto fail_dqrr; |
1260 | } |
1261 | if (qm_mr_init(portal: p, pmode: qm_mr_pvb, cmode: qm_mr_cci)) { |
1262 | dev_err(c->dev, "MR initialisation failed\n" ); |
1263 | goto fail_mr; |
1264 | } |
1265 | if (qm_mc_init(portal: p)) { |
1266 | dev_err(c->dev, "MC initialisation failed\n" ); |
1267 | goto fail_mc; |
1268 | } |
1269 | /* static interrupt-gating controls */ |
1270 | qm_dqrr_set_ithresh(portal: p, QMAN_PIRQ_DQRR_ITHRESH); |
1271 | qm_mr_set_ithresh(portal: p, QMAN_PIRQ_MR_ITHRESH); |
1272 | qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD); |
1273 | portal->cgrs = kmalloc_array(n: 2, size: sizeof(*cgrs), GFP_KERNEL); |
1274 | if (!portal->cgrs) |
1275 | goto fail_cgrs; |
1276 | /* initial snapshot is no-depletion */ |
1277 | qman_cgrs_init(c: &portal->cgrs[1]); |
1278 | if (cgrs) |
1279 | portal->cgrs[0] = *cgrs; |
1280 | else |
1281 | /* if the given mask is NULL, assume all CGRs can be seen */ |
1282 | qman_cgrs_fill(c: &portal->cgrs[0]); |
1283 | INIT_LIST_HEAD(list: &portal->cgr_cbs); |
1284 | raw_spin_lock_init(&portal->cgr_lock); |
1285 | INIT_WORK(&portal->congestion_work, qm_congestion_task); |
1286 | INIT_WORK(&portal->mr_work, qm_mr_process_task); |
1287 | portal->bits = 0; |
1288 | portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | |
1289 | QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | |
1290 | QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; |
1291 | isdr = 0xffffffff; |
1292 | qm_out(p, QM_REG_ISDR, val: isdr); |
1293 | portal->irq_sources = 0; |
1294 | qm_out(p, QM_REG_IER, val: 0); |
1295 | snprintf(buf: portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); |
1296 | qm_out(p, QM_REG_IIR, val: 1); |
1297 | if (request_irq(irq: c->irq, handler: portal_isr, flags: 0, name: portal->irqname, dev: portal)) { |
1298 | dev_err(c->dev, "request_irq() failed\n" ); |
1299 | goto fail_irq; |
1300 | } |
1301 | |
1302 | if (dpaa_set_portal_irq_affinity(dev: c->dev, irq: c->irq, cpu: c->cpu)) |
1303 | goto fail_affinity; |
1304 | |
1305 | /* Need EQCR to be empty before continuing */ |
1306 | isdr &= ~QM_PIRQ_EQCI; |
1307 | qm_out(p, QM_REG_ISDR, val: isdr); |
1308 | ret = qm_eqcr_get_fill(portal: p); |
1309 | if (ret) { |
1310 | dev_err(c->dev, "EQCR unclean\n" ); |
1311 | goto fail_eqcr_empty; |
1312 | } |
1313 | isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI); |
1314 | qm_out(p, QM_REG_ISDR, val: isdr); |
1315 | if (qm_dqrr_current(portal: p)) { |
1316 | dev_dbg(c->dev, "DQRR unclean\n" ); |
1317 | qm_dqrr_cdc_consume_n(portal: p, bitmask: 0xffff); |
1318 | } |
1319 | if (qm_mr_current(portal: p) && drain_mr_fqrni(p)) { |
1320 | /* special handling, drain just in case it's a few FQRNIs */ |
1321 | const union qm_mr_entry *e = qm_mr_current(portal: p); |
1322 | |
1323 | dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n" , |
1324 | e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd)); |
1325 | goto fail_dqrr_mr_empty; |
1326 | } |
1327 | /* Success */ |
1328 | portal->config = c; |
1329 | qm_out(p, QM_REG_ISR, val: 0xffffffff); |
1330 | qm_out(p, QM_REG_ISDR, val: 0); |
1331 | if (!qman_requires_cleanup()) |
1332 | qm_out(p, QM_REG_IIR, val: 0); |
1333 | /* Write a sane SDQCR */ |
1334 | qm_dqrr_sdqcr_set(portal: p, sdqcr: portal->sdqcr); |
1335 | return 0; |
1336 | |
1337 | fail_dqrr_mr_empty: |
1338 | fail_eqcr_empty: |
1339 | fail_affinity: |
1340 | free_irq(c->irq, portal); |
1341 | fail_irq: |
1342 | kfree(objp: portal->cgrs); |
1343 | fail_cgrs: |
1344 | qm_mc_finish(portal: p); |
1345 | fail_mc: |
1346 | qm_mr_finish(portal: p); |
1347 | fail_mr: |
1348 | qm_dqrr_finish(portal: p); |
1349 | fail_dqrr: |
1350 | qm_eqcr_finish(portal: p); |
1351 | fail_eqcr: |
1352 | return -EIO; |
1353 | } |
1354 | |
1355 | struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c, |
1356 | const struct qman_cgrs *cgrs) |
1357 | { |
1358 | struct qman_portal *portal; |
1359 | int err; |
1360 | |
1361 | portal = &per_cpu(qman_affine_portal, c->cpu); |
1362 | err = qman_create_portal(portal, c, cgrs); |
1363 | if (err) |
1364 | return NULL; |
1365 | |
1366 | spin_lock(lock: &affine_mask_lock); |
1367 | cpumask_set_cpu(cpu: c->cpu, dstp: &affine_mask); |
1368 | affine_channels[c->cpu] = c->channel; |
1369 | affine_portals[c->cpu] = portal; |
1370 | spin_unlock(lock: &affine_mask_lock); |
1371 | |
1372 | return portal; |
1373 | } |
1374 | |
1375 | static void qman_destroy_portal(struct qman_portal *qm) |
1376 | { |
1377 | const struct qm_portal_config *pcfg; |
1378 | |
1379 | /* Stop dequeues on the portal */ |
1380 | qm_dqrr_sdqcr_set(portal: &qm->p, sdqcr: 0); |
1381 | |
1382 | /* |
1383 | * NB we do this to "quiesce" EQCR. If we add enqueue-completions or |
1384 | * something related to QM_PIRQ_EQCI, this may need fixing. |
1385 | * Also, due to the prefetching model used for CI updates in the enqueue |
1386 | * path, this update will only invalidate the CI cacheline *after* |
1387 | * working on it, so we need to call this twice to ensure a full update |
1388 | * irrespective of where the enqueue processing was at when the teardown |
1389 | * began. |
1390 | */ |
1391 | qm_eqcr_cce_update(portal: &qm->p); |
1392 | qm_eqcr_cce_update(portal: &qm->p); |
1393 | pcfg = qm->config; |
1394 | |
1395 | free_irq(pcfg->irq, qm); |
1396 | |
1397 | kfree(objp: qm->cgrs); |
1398 | qm_mc_finish(portal: &qm->p); |
1399 | qm_mr_finish(portal: &qm->p); |
1400 | qm_dqrr_finish(portal: &qm->p); |
1401 | qm_eqcr_finish(portal: &qm->p); |
1402 | |
1403 | qm->config = NULL; |
1404 | } |
1405 | |
1406 | const struct qm_portal_config *qman_destroy_affine_portal(void) |
1407 | { |
1408 | struct qman_portal *qm = get_affine_portal(); |
1409 | const struct qm_portal_config *pcfg; |
1410 | int cpu; |
1411 | |
1412 | pcfg = qm->config; |
1413 | cpu = pcfg->cpu; |
1414 | |
1415 | qman_destroy_portal(qm); |
1416 | |
1417 | spin_lock(lock: &affine_mask_lock); |
1418 | cpumask_clear_cpu(cpu, dstp: &affine_mask); |
1419 | spin_unlock(lock: &affine_mask_lock); |
1420 | put_affine_portal(); |
1421 | return pcfg; |
1422 | } |
1423 | |
1424 | /* Inline helper to reduce nesting in __poll_portal_slow() */ |
1425 | static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, |
1426 | const union qm_mr_entry *msg, u8 verb) |
1427 | { |
1428 | switch (verb) { |
1429 | case QM_MR_VERB_FQRL: |
1430 | DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); |
1431 | fq_clear(fq, QMAN_FQ_STATE_ORL); |
1432 | break; |
1433 | case QM_MR_VERB_FQRN: |
1434 | DPAA_ASSERT(fq->state == qman_fq_state_parked || |
1435 | fq->state == qman_fq_state_sched); |
1436 | DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); |
1437 | fq_clear(fq, QMAN_FQ_STATE_CHANGING); |
1438 | if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) |
1439 | fq_set(fq, QMAN_FQ_STATE_NE); |
1440 | if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) |
1441 | fq_set(fq, QMAN_FQ_STATE_ORL); |
1442 | fq->state = qman_fq_state_retired; |
1443 | break; |
1444 | case QM_MR_VERB_FQPN: |
1445 | DPAA_ASSERT(fq->state == qman_fq_state_sched); |
1446 | DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); |
1447 | fq->state = qman_fq_state_parked; |
1448 | } |
1449 | } |
1450 | |
1451 | static void qm_congestion_task(struct work_struct *work) |
1452 | { |
1453 | struct qman_portal *p = container_of(work, struct qman_portal, |
1454 | congestion_work); |
1455 | struct qman_cgrs rr, c; |
1456 | union qm_mc_result *mcr; |
1457 | struct qman_cgr *cgr; |
1458 | |
1459 | /* |
1460 | * FIXME: QM_MCR_TIMEOUT is 10ms, which is too long for a raw spinlock! |
1461 | */ |
1462 | raw_spin_lock_irq(&p->cgr_lock); |
1463 | qm_mc_start(portal: &p->p); |
1464 | qm_mc_commit(portal: &p->p, QM_MCC_VERB_QUERYCONGESTION); |
1465 | if (!qm_mc_result_timeout(portal: &p->p, mcr: &mcr)) { |
1466 | raw_spin_unlock_irq(&p->cgr_lock); |
1467 | dev_crit(p->config->dev, "QUERYCONGESTION timeout\n" ); |
1468 | qman_p_irqsource_add(p, QM_PIRQ_CSCI); |
1469 | return; |
1470 | } |
1471 | /* mask out the ones I'm not interested in */ |
1472 | qman_cgrs_and(dest: &rr, a: (struct qman_cgrs *)&mcr->querycongestion.state, |
1473 | b: &p->cgrs[0]); |
1474 | /* check previous snapshot for delta, enter/exit congestion */ |
1475 | qman_cgrs_xor(dest: &c, a: &rr, b: &p->cgrs[1]); |
1476 | /* update snapshot */ |
1477 | qman_cgrs_cp(dest: &p->cgrs[1], src: &rr); |
1478 | /* Invoke callback */ |
1479 | list_for_each_entry(cgr, &p->cgr_cbs, node) |
1480 | if (cgr->cb && qman_cgrs_get(c: &c, cgr: cgr->cgrid)) |
1481 | cgr->cb(p, cgr, qman_cgrs_get(c: &rr, cgr: cgr->cgrid)); |
1482 | raw_spin_unlock_irq(&p->cgr_lock); |
1483 | qman_p_irqsource_add(p, QM_PIRQ_CSCI); |
1484 | } |
1485 | |
1486 | static void qm_mr_process_task(struct work_struct *work) |
1487 | { |
1488 | struct qman_portal *p = container_of(work, struct qman_portal, |
1489 | mr_work); |
1490 | const union qm_mr_entry *msg; |
1491 | struct qman_fq *fq; |
1492 | u8 verb, num = 0; |
1493 | |
1494 | preempt_disable(); |
1495 | |
1496 | while (1) { |
1497 | qm_mr_pvb_update(portal: &p->p); |
1498 | msg = qm_mr_current(portal: &p->p); |
1499 | if (!msg) |
1500 | break; |
1501 | |
1502 | verb = msg->verb & QM_MR_VERB_TYPE_MASK; |
1503 | /* The message is a software ERN iff the 0x20 bit is clear */ |
1504 | if (verb & 0x20) { |
1505 | switch (verb) { |
1506 | case QM_MR_VERB_FQRNI: |
1507 | /* nada, we drop FQRNIs on the floor */ |
1508 | break; |
1509 | case QM_MR_VERB_FQRN: |
1510 | case QM_MR_VERB_FQRL: |
1511 | /* Lookup in the retirement table */ |
1512 | fq = fqid_to_fq(qm_fqid_get(&msg->fq)); |
1513 | if (WARN_ON(!fq)) |
1514 | break; |
1515 | fq_state_change(p, fq, msg, verb); |
1516 | if (fq->cb.fqs) |
1517 | fq->cb.fqs(p, fq, msg); |
1518 | break; |
1519 | case QM_MR_VERB_FQPN: |
1520 | /* Parked */ |
1521 | fq = tag_to_fq(be32_to_cpu(msg->fq.context_b)); |
1522 | fq_state_change(p, fq, msg, verb); |
1523 | if (fq->cb.fqs) |
1524 | fq->cb.fqs(p, fq, msg); |
1525 | break; |
1526 | case QM_MR_VERB_DC_ERN: |
1527 | /* DCP ERN */ |
1528 | pr_crit_once("Leaking DCP ERNs!\n" ); |
1529 | break; |
1530 | default: |
1531 | pr_crit("Invalid MR verb 0x%02x\n" , verb); |
1532 | } |
1533 | } else { |
1534 | /* Its a software ERN */ |
1535 | fq = tag_to_fq(be32_to_cpu(msg->ern.tag)); |
1536 | fq->cb.ern(p, fq, msg); |
1537 | } |
1538 | num++; |
1539 | qm_mr_next(portal: &p->p); |
1540 | } |
1541 | |
1542 | qm_mr_cci_consume(portal: &p->p, num); |
1543 | qman_p_irqsource_add(p, QM_PIRQ_MRI); |
1544 | preempt_enable(); |
1545 | } |
1546 | |
1547 | static u32 __poll_portal_slow(struct qman_portal *p, u32 is) |
1548 | { |
1549 | if (is & QM_PIRQ_CSCI) { |
1550 | qman_p_irqsource_remove(p, QM_PIRQ_CSCI); |
1551 | queue_work_on(smp_processor_id(), wq: qm_portal_wq, |
1552 | work: &p->congestion_work); |
1553 | } |
1554 | |
1555 | if (is & QM_PIRQ_EQRI) { |
1556 | qm_eqcr_cce_update(portal: &p->p); |
1557 | qm_eqcr_set_ithresh(portal: &p->p, ithresh: 0); |
1558 | wake_up(&affine_queue); |
1559 | } |
1560 | |
1561 | if (is & QM_PIRQ_MRI) { |
1562 | qman_p_irqsource_remove(p, QM_PIRQ_MRI); |
1563 | queue_work_on(smp_processor_id(), wq: qm_portal_wq, |
1564 | work: &p->mr_work); |
1565 | } |
1566 | |
1567 | return is; |
1568 | } |
1569 | |
1570 | /* |
1571 | * remove some slowish-path stuff from the "fast path" and make sure it isn't |
1572 | * inlined. |
1573 | */ |
1574 | static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) |
1575 | { |
1576 | p->vdqcr_owned = NULL; |
1577 | fq_clear(fq, QMAN_FQ_STATE_VDQCR); |
1578 | wake_up(&affine_queue); |
1579 | } |
1580 | |
1581 | /* |
1582 | * The only states that would conflict with other things if they ran at the |
1583 | * same time on the same cpu are: |
1584 | * |
1585 | * (i) setting/clearing vdqcr_owned, and |
1586 | * (ii) clearing the NE (Not Empty) flag. |
1587 | * |
1588 | * Both are safe. Because; |
1589 | * |
1590 | * (i) this clearing can only occur after qman_volatile_dequeue() has set the |
1591 | * vdqcr_owned field (which it does before setting VDQCR), and |
1592 | * qman_volatile_dequeue() blocks interrupts and preemption while this is |
1593 | * done so that we can't interfere. |
1594 | * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as |
1595 | * with (i) that API prevents us from interfering until it's safe. |
1596 | * |
1597 | * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far |
1598 | * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett |
1599 | * advantage comes from this function not having to "lock" anything at all. |
1600 | * |
1601 | * Note also that the callbacks are invoked at points which are safe against the |
1602 | * above potential conflicts, but that this function itself is not re-entrant |
1603 | * (this is because the function tracks one end of each FIFO in the portal and |
1604 | * we do *not* want to lock that). So the consequence is that it is safe for |
1605 | * user callbacks to call into any QMan API. |
1606 | */ |
1607 | static inline unsigned int __poll_portal_fast(struct qman_portal *p, |
1608 | unsigned int poll_limit, bool sched_napi) |
1609 | { |
1610 | const struct qm_dqrr_entry *dq; |
1611 | struct qman_fq *fq; |
1612 | enum qman_cb_dqrr_result res; |
1613 | unsigned int limit = 0; |
1614 | |
1615 | do { |
1616 | qm_dqrr_pvb_update(portal: &p->p); |
1617 | dq = qm_dqrr_current(portal: &p->p); |
1618 | if (!dq) |
1619 | break; |
1620 | |
1621 | if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { |
1622 | /* |
1623 | * VDQCR: don't trust context_b as the FQ may have |
1624 | * been configured for h/w consumption and we're |
1625 | * draining it post-retirement. |
1626 | */ |
1627 | fq = p->vdqcr_owned; |
1628 | /* |
1629 | * We only set QMAN_FQ_STATE_NE when retiring, so we |
1630 | * only need to check for clearing it when doing |
1631 | * volatile dequeues. It's one less thing to check |
1632 | * in the critical path (SDQCR). |
1633 | */ |
1634 | if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) |
1635 | fq_clear(fq, QMAN_FQ_STATE_NE); |
1636 | /* |
1637 | * This is duplicated from the SDQCR code, but we |
1638 | * have stuff to do before *and* after this callback, |
1639 | * and we don't want multiple if()s in the critical |
1640 | * path (SDQCR). |
1641 | */ |
1642 | res = fq->cb.dqrr(p, fq, dq, sched_napi); |
1643 | if (res == qman_cb_dqrr_stop) |
1644 | break; |
1645 | /* Check for VDQCR completion */ |
1646 | if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) |
1647 | clear_vdqcr(p, fq); |
1648 | } else { |
1649 | /* SDQCR: context_b points to the FQ */ |
1650 | fq = tag_to_fq(be32_to_cpu(dq->context_b)); |
1651 | /* Now let the callback do its stuff */ |
1652 | res = fq->cb.dqrr(p, fq, dq, sched_napi); |
1653 | /* |
1654 | * The callback can request that we exit without |
1655 | * consuming this entry nor advancing; |
1656 | */ |
1657 | if (res == qman_cb_dqrr_stop) |
1658 | break; |
1659 | } |
1660 | /* Interpret 'dq' from a driver perspective. */ |
1661 | /* |
1662 | * Parking isn't possible unless HELDACTIVE was set. NB, |
1663 | * FORCEELIGIBLE implies HELDACTIVE, so we only need to |
1664 | * check for HELDACTIVE to cover both. |
1665 | */ |
1666 | DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || |
1667 | (res != qman_cb_dqrr_park)); |
1668 | /* just means "skip it, I'll consume it myself later on" */ |
1669 | if (res != qman_cb_dqrr_defer) |
1670 | qm_dqrr_cdc_consume_1ptr(portal: &p->p, dq, |
1671 | park: res == qman_cb_dqrr_park); |
1672 | /* Move forward */ |
1673 | qm_dqrr_next(portal: &p->p); |
1674 | /* |
1675 | * Entry processed and consumed, increment our counter. The |
1676 | * callback can request that we exit after consuming the |
1677 | * entry, and we also exit if we reach our processing limit, |
1678 | * so loop back only if neither of these conditions is met. |
1679 | */ |
1680 | } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop); |
1681 | |
1682 | return limit; |
1683 | } |
1684 | |
1685 | void qman_p_irqsource_add(struct qman_portal *p, u32 bits) |
1686 | { |
1687 | unsigned long irqflags; |
1688 | |
1689 | local_irq_save(irqflags); |
1690 | p->irq_sources |= bits & QM_PIRQ_VISIBLE; |
1691 | qm_out(p: &p->p, QM_REG_IER, val: p->irq_sources); |
1692 | local_irq_restore(irqflags); |
1693 | } |
1694 | EXPORT_SYMBOL(qman_p_irqsource_add); |
1695 | |
1696 | void qman_p_irqsource_remove(struct qman_portal *p, u32 bits) |
1697 | { |
1698 | unsigned long irqflags; |
1699 | u32 ier; |
1700 | |
1701 | /* |
1702 | * Our interrupt handler only processes+clears status register bits that |
1703 | * are in p->irq_sources. As we're trimming that mask, if one of them |
1704 | * were to assert in the status register just before we remove it from |
1705 | * the enable register, there would be an interrupt-storm when we |
1706 | * release the IRQ lock. So we wait for the enable register update to |
1707 | * take effect in h/w (by reading it back) and then clear all other bits |
1708 | * in the status register. Ie. we clear them from ISR once it's certain |
1709 | * IER won't allow them to reassert. |
1710 | */ |
1711 | local_irq_save(irqflags); |
1712 | bits &= QM_PIRQ_VISIBLE; |
1713 | p->irq_sources &= ~bits; |
1714 | qm_out(p: &p->p, QM_REG_IER, val: p->irq_sources); |
1715 | ier = qm_in(p: &p->p, QM_REG_IER); |
1716 | /* |
1717 | * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a |
1718 | * data-dependency, ie. to protect against re-ordering. |
1719 | */ |
1720 | qm_out(p: &p->p, QM_REG_ISR, val: ~ier); |
1721 | local_irq_restore(irqflags); |
1722 | } |
1723 | EXPORT_SYMBOL(qman_p_irqsource_remove); |
1724 | |
1725 | const cpumask_t *qman_affine_cpus(void) |
1726 | { |
1727 | return &affine_mask; |
1728 | } |
1729 | EXPORT_SYMBOL(qman_affine_cpus); |
1730 | |
1731 | u16 qman_affine_channel(int cpu) |
1732 | { |
1733 | if (cpu < 0) { |
1734 | struct qman_portal *portal = get_affine_portal(); |
1735 | |
1736 | cpu = portal->config->cpu; |
1737 | put_affine_portal(); |
1738 | } |
1739 | WARN_ON(!cpumask_test_cpu(cpu, &affine_mask)); |
1740 | return affine_channels[cpu]; |
1741 | } |
1742 | EXPORT_SYMBOL(qman_affine_channel); |
1743 | |
1744 | struct qman_portal *qman_get_affine_portal(int cpu) |
1745 | { |
1746 | return affine_portals[cpu]; |
1747 | } |
1748 | EXPORT_SYMBOL(qman_get_affine_portal); |
1749 | |
1750 | int qman_start_using_portal(struct qman_portal *p, struct device *dev) |
1751 | { |
1752 | return (!device_link_add(consumer: dev, supplier: p->config->dev, |
1753 | DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0; |
1754 | } |
1755 | EXPORT_SYMBOL(qman_start_using_portal); |
1756 | |
1757 | int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit) |
1758 | { |
1759 | return __poll_portal_fast(p, poll_limit: limit, sched_napi: false); |
1760 | } |
1761 | EXPORT_SYMBOL(qman_p_poll_dqrr); |
1762 | |
1763 | void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) |
1764 | { |
1765 | unsigned long irqflags; |
1766 | |
1767 | local_irq_save(irqflags); |
1768 | pools &= p->config->pools; |
1769 | p->sdqcr |= pools; |
1770 | qm_dqrr_sdqcr_set(portal: &p->p, sdqcr: p->sdqcr); |
1771 | local_irq_restore(irqflags); |
1772 | } |
1773 | EXPORT_SYMBOL(qman_p_static_dequeue_add); |
1774 | |
1775 | /* Frame queue API */ |
1776 | |
1777 | static const char *mcr_result_str(u8 result) |
1778 | { |
1779 | switch (result) { |
1780 | case QM_MCR_RESULT_NULL: |
1781 | return "QM_MCR_RESULT_NULL" ; |
1782 | case QM_MCR_RESULT_OK: |
1783 | return "QM_MCR_RESULT_OK" ; |
1784 | case QM_MCR_RESULT_ERR_FQID: |
1785 | return "QM_MCR_RESULT_ERR_FQID" ; |
1786 | case QM_MCR_RESULT_ERR_FQSTATE: |
1787 | return "QM_MCR_RESULT_ERR_FQSTATE" ; |
1788 | case QM_MCR_RESULT_ERR_NOTEMPTY: |
1789 | return "QM_MCR_RESULT_ERR_NOTEMPTY" ; |
1790 | case QM_MCR_RESULT_PENDING: |
1791 | return "QM_MCR_RESULT_PENDING" ; |
1792 | case QM_MCR_RESULT_ERR_BADCOMMAND: |
1793 | return "QM_MCR_RESULT_ERR_BADCOMMAND" ; |
1794 | } |
1795 | return "<unknown MCR result>" ; |
1796 | } |
1797 | |
1798 | int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) |
1799 | { |
1800 | if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { |
1801 | int ret = qman_alloc_fqid(&fqid); |
1802 | |
1803 | if (ret) |
1804 | return ret; |
1805 | } |
1806 | fq->fqid = fqid; |
1807 | fq->flags = flags; |
1808 | fq->state = qman_fq_state_oos; |
1809 | fq->cgr_groupid = 0; |
1810 | |
1811 | /* A context_b of 0 is allegedly special, so don't use that fqid */ |
1812 | if (fqid == 0 || fqid >= num_fqids) { |
1813 | WARN(1, "bad fqid %d\n" , fqid); |
1814 | return -EINVAL; |
1815 | } |
1816 | |
1817 | fq->idx = fqid * 2; |
1818 | if (flags & QMAN_FQ_FLAG_NO_MODIFY) |
1819 | fq->idx++; |
1820 | |
1821 | WARN_ON(fq_table[fq->idx]); |
1822 | fq_table[fq->idx] = fq; |
1823 | |
1824 | return 0; |
1825 | } |
1826 | EXPORT_SYMBOL(qman_create_fq); |
1827 | |
1828 | void qman_destroy_fq(struct qman_fq *fq) |
1829 | { |
1830 | /* |
1831 | * We don't need to lock the FQ as it is a pre-condition that the FQ be |
1832 | * quiesced. Instead, run some checks. |
1833 | */ |
1834 | switch (fq->state) { |
1835 | case qman_fq_state_parked: |
1836 | case qman_fq_state_oos: |
1837 | if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) |
1838 | qman_release_fqid(fqid: fq->fqid); |
1839 | |
1840 | DPAA_ASSERT(fq_table[fq->idx]); |
1841 | fq_table[fq->idx] = NULL; |
1842 | return; |
1843 | default: |
1844 | break; |
1845 | } |
1846 | DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!" ); |
1847 | } |
1848 | EXPORT_SYMBOL(qman_destroy_fq); |
1849 | |
1850 | u32 qman_fq_fqid(struct qman_fq *fq) |
1851 | { |
1852 | return fq->fqid; |
1853 | } |
1854 | EXPORT_SYMBOL(qman_fq_fqid); |
1855 | |
1856 | int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) |
1857 | { |
1858 | union qm_mc_command *mcc; |
1859 | union qm_mc_result *mcr; |
1860 | struct qman_portal *p; |
1861 | u8 res, myverb; |
1862 | int ret = 0; |
1863 | |
1864 | myverb = (flags & QMAN_INITFQ_FLAG_SCHED) |
1865 | ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED; |
1866 | |
1867 | if (fq->state != qman_fq_state_oos && |
1868 | fq->state != qman_fq_state_parked) |
1869 | return -EINVAL; |
1870 | #ifdef CONFIG_FSL_DPAA_CHECKING |
1871 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) |
1872 | return -EINVAL; |
1873 | #endif |
1874 | if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) { |
1875 | /* And can't be set at the same time as TDTHRESH */ |
1876 | if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH) |
1877 | return -EINVAL; |
1878 | } |
1879 | /* Issue an INITFQ_[PARKED|SCHED] management command */ |
1880 | p = get_affine_portal(); |
1881 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || |
1882 | (fq->state != qman_fq_state_oos && |
1883 | fq->state != qman_fq_state_parked)) { |
1884 | ret = -EBUSY; |
1885 | goto out; |
1886 | } |
1887 | mcc = qm_mc_start(portal: &p->p); |
1888 | if (opts) |
1889 | mcc->initfq = *opts; |
1890 | qm_fqid_set(&mcc->fq, fq->fqid); |
1891 | mcc->initfq.count = 0; |
1892 | /* |
1893 | * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a |
1894 | * demux pointer. Otherwise, the caller-provided value is allowed to |
1895 | * stand, don't overwrite it. |
1896 | */ |
1897 | if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { |
1898 | dma_addr_t phys_fq; |
1899 | |
1900 | mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB); |
1901 | mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq)); |
1902 | /* |
1903 | * and the physical address - NB, if the user wasn't trying to |
1904 | * set CONTEXTA, clear the stashing settings. |
1905 | */ |
1906 | if (!(be16_to_cpu(mcc->initfq.we_mask) & |
1907 | QM_INITFQ_WE_CONTEXTA)) { |
1908 | mcc->initfq.we_mask |= |
1909 | cpu_to_be16(QM_INITFQ_WE_CONTEXTA); |
1910 | memset(&mcc->initfq.fqd.context_a, 0, |
1911 | sizeof(mcc->initfq.fqd.context_a)); |
1912 | } else { |
1913 | struct qman_portal *p = qman_dma_portal; |
1914 | |
1915 | phys_fq = dma_map_single(p->config->dev, fq, |
1916 | sizeof(*fq), DMA_TO_DEVICE); |
1917 | if (dma_mapping_error(dev: p->config->dev, dma_addr: phys_fq)) { |
1918 | dev_err(p->config->dev, "dma_mapping failed\n" ); |
1919 | ret = -EIO; |
1920 | goto out; |
1921 | } |
1922 | |
1923 | qm_fqd_stashing_set64(fqd: &mcc->initfq.fqd, addr: phys_fq); |
1924 | } |
1925 | } |
1926 | if (flags & QMAN_INITFQ_FLAG_LOCAL) { |
1927 | int wq = 0; |
1928 | |
1929 | if (!(be16_to_cpu(mcc->initfq.we_mask) & |
1930 | QM_INITFQ_WE_DESTWQ)) { |
1931 | mcc->initfq.we_mask |= |
1932 | cpu_to_be16(QM_INITFQ_WE_DESTWQ); |
1933 | wq = 4; |
1934 | } |
1935 | qm_fqd_set_destwq(fqd: &mcc->initfq.fqd, ch: p->config->channel, wq); |
1936 | } |
1937 | qm_mc_commit(portal: &p->p, myverb); |
1938 | if (!qm_mc_result_timeout(portal: &p->p, mcr: &mcr)) { |
1939 | dev_err(p->config->dev, "MCR timeout\n" ); |
1940 | ret = -ETIMEDOUT; |
1941 | goto out; |
1942 | } |
1943 | |
1944 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); |
1945 | res = mcr->result; |
1946 | if (res != QM_MCR_RESULT_OK) { |
1947 | ret = -EIO; |
1948 | goto out; |
1949 | } |
1950 | if (opts) { |
1951 | if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) { |
1952 | if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE) |
1953 | fq_set(fq, QMAN_FQ_STATE_CGR_EN); |
1954 | else |
1955 | fq_clear(fq, QMAN_FQ_STATE_CGR_EN); |
1956 | } |
1957 | if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID) |
1958 | fq->cgr_groupid = opts->fqd.cgid; |
1959 | } |
1960 | fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? |
1961 | qman_fq_state_sched : qman_fq_state_parked; |
1962 | |
1963 | out: |
1964 | put_affine_portal(); |
1965 | return ret; |
1966 | } |
1967 | EXPORT_SYMBOL(qman_init_fq); |
1968 | |
1969 | int qman_schedule_fq(struct qman_fq *fq) |
1970 | { |
1971 | union qm_mc_command *mcc; |
1972 | union qm_mc_result *mcr; |
1973 | struct qman_portal *p; |
1974 | int ret = 0; |
1975 | |
1976 | if (fq->state != qman_fq_state_parked) |
1977 | return -EINVAL; |
1978 | #ifdef CONFIG_FSL_DPAA_CHECKING |
1979 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) |
1980 | return -EINVAL; |
1981 | #endif |
1982 | /* Issue a ALTERFQ_SCHED management command */ |
1983 | p = get_affine_portal(); |
1984 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || |
1985 | fq->state != qman_fq_state_parked) { |
1986 | ret = -EBUSY; |
1987 | goto out; |
1988 | } |
1989 | mcc = qm_mc_start(portal: &p->p); |
1990 | qm_fqid_set(&mcc->fq, fq->fqid); |
1991 | qm_mc_commit(portal: &p->p, QM_MCC_VERB_ALTER_SCHED); |
1992 | if (!qm_mc_result_timeout(portal: &p->p, mcr: &mcr)) { |
1993 | dev_err(p->config->dev, "ALTER_SCHED timeout\n" ); |
1994 | ret = -ETIMEDOUT; |
1995 | goto out; |
1996 | } |
1997 | |
1998 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); |
1999 | if (mcr->result != QM_MCR_RESULT_OK) { |
2000 | ret = -EIO; |
2001 | goto out; |
2002 | } |
2003 | fq->state = qman_fq_state_sched; |
2004 | out: |
2005 | put_affine_portal(); |
2006 | return ret; |
2007 | } |
2008 | EXPORT_SYMBOL(qman_schedule_fq); |
2009 | |
2010 | int qman_retire_fq(struct qman_fq *fq, u32 *flags) |
2011 | { |
2012 | union qm_mc_command *mcc; |
2013 | union qm_mc_result *mcr; |
2014 | struct qman_portal *p; |
2015 | int ret; |
2016 | u8 res; |
2017 | |
2018 | if (fq->state != qman_fq_state_parked && |
2019 | fq->state != qman_fq_state_sched) |
2020 | return -EINVAL; |
2021 | #ifdef CONFIG_FSL_DPAA_CHECKING |
2022 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) |
2023 | return -EINVAL; |
2024 | #endif |
2025 | p = get_affine_portal(); |
2026 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || |
2027 | fq->state == qman_fq_state_retired || |
2028 | fq->state == qman_fq_state_oos) { |
2029 | ret = -EBUSY; |
2030 | goto out; |
2031 | } |
2032 | mcc = qm_mc_start(portal: &p->p); |
2033 | qm_fqid_set(&mcc->fq, fq->fqid); |
2034 | qm_mc_commit(portal: &p->p, QM_MCC_VERB_ALTER_RETIRE); |
2035 | if (!qm_mc_result_timeout(portal: &p->p, mcr: &mcr)) { |
2036 | dev_crit(p->config->dev, "ALTER_RETIRE timeout\n" ); |
2037 | ret = -ETIMEDOUT; |
2038 | goto out; |
2039 | } |
2040 | |
2041 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); |
2042 | res = mcr->result; |
2043 | /* |
2044 | * "Elegant" would be to treat OK/PENDING the same way; set CHANGING, |
2045 | * and defer the flags until FQRNI or FQRN (respectively) show up. But |
2046 | * "Friendly" is to process OK immediately, and not set CHANGING. We do |
2047 | * friendly, otherwise the caller doesn't necessarily have a fully |
2048 | * "retired" FQ on return even if the retirement was immediate. However |
2049 | * this does mean some code duplication between here and |
2050 | * fq_state_change(). |
2051 | */ |
2052 | if (res == QM_MCR_RESULT_OK) { |
2053 | ret = 0; |
2054 | /* Process 'fq' right away, we'll ignore FQRNI */ |
2055 | if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) |
2056 | fq_set(fq, QMAN_FQ_STATE_NE); |
2057 | if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) |
2058 | fq_set(fq, QMAN_FQ_STATE_ORL); |
2059 | if (flags) |
2060 | *flags = fq->flags; |
2061 | fq->state = qman_fq_state_retired; |
2062 | if (fq->cb.fqs) { |
2063 | /* |
2064 | * Another issue with supporting "immediate" retirement |
2065 | * is that we're forced to drop FQRNIs, because by the |
2066 | * time they're seen it may already be "too late" (the |
2067 | * fq may have been OOS'd and free()'d already). But if |
2068 | * the upper layer wants a callback whether it's |
2069 | * immediate or not, we have to fake a "MR" entry to |
2070 | * look like an FQRNI... |
2071 | */ |
2072 | union qm_mr_entry msg; |
2073 | |
2074 | msg.verb = QM_MR_VERB_FQRNI; |
2075 | msg.fq.fqs = mcr->alterfq.fqs; |
2076 | qm_fqid_set(&msg.fq, fq->fqid); |
2077 | msg.fq.context_b = cpu_to_be32(fq_to_tag(fq)); |
2078 | fq->cb.fqs(p, fq, &msg); |
2079 | } |
2080 | } else if (res == QM_MCR_RESULT_PENDING) { |
2081 | ret = 1; |
2082 | fq_set(fq, QMAN_FQ_STATE_CHANGING); |
2083 | } else { |
2084 | ret = -EIO; |
2085 | } |
2086 | out: |
2087 | put_affine_portal(); |
2088 | return ret; |
2089 | } |
2090 | EXPORT_SYMBOL(qman_retire_fq); |
2091 | |
2092 | int qman_oos_fq(struct qman_fq *fq) |
2093 | { |
2094 | union qm_mc_command *mcc; |
2095 | union qm_mc_result *mcr; |
2096 | struct qman_portal *p; |
2097 | int ret = 0; |
2098 | |
2099 | if (fq->state != qman_fq_state_retired) |
2100 | return -EINVAL; |
2101 | #ifdef CONFIG_FSL_DPAA_CHECKING |
2102 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) |
2103 | return -EINVAL; |
2104 | #endif |
2105 | p = get_affine_portal(); |
2106 | if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) || |
2107 | fq->state != qman_fq_state_retired) { |
2108 | ret = -EBUSY; |
2109 | goto out; |
2110 | } |
2111 | mcc = qm_mc_start(portal: &p->p); |
2112 | qm_fqid_set(&mcc->fq, fq->fqid); |
2113 | qm_mc_commit(portal: &p->p, QM_MCC_VERB_ALTER_OOS); |
2114 | if (!qm_mc_result_timeout(portal: &p->p, mcr: &mcr)) { |
2115 | ret = -ETIMEDOUT; |
2116 | goto out; |
2117 | } |
2118 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); |
2119 | if (mcr->result != QM_MCR_RESULT_OK) { |
2120 | ret = -EIO; |
2121 | goto out; |
2122 | } |
2123 | fq->state = qman_fq_state_oos; |
2124 | out: |
2125 | put_affine_portal(); |
2126 | return ret; |
2127 | } |
2128 | EXPORT_SYMBOL(qman_oos_fq); |
2129 | |
2130 | int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) |
2131 | { |
2132 | union qm_mc_command *mcc; |
2133 | union qm_mc_result *mcr; |
2134 | struct qman_portal *p = get_affine_portal(); |
2135 | int ret = 0; |
2136 | |
2137 | mcc = qm_mc_start(portal: &p->p); |
2138 | qm_fqid_set(&mcc->fq, fq->fqid); |
2139 | qm_mc_commit(portal: &p->p, QM_MCC_VERB_QUERYFQ); |
2140 | if (!qm_mc_result_timeout(portal: &p->p, mcr: &mcr)) { |
2141 | ret = -ETIMEDOUT; |
2142 | goto out; |
2143 | } |
2144 | |
2145 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); |
2146 | if (mcr->result == QM_MCR_RESULT_OK) |
2147 | *fqd = mcr->queryfq.fqd; |
2148 | else |
2149 | ret = -EIO; |
2150 | out: |
2151 | put_affine_portal(); |
2152 | return ret; |
2153 | } |
2154 | |
2155 | int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np) |
2156 | { |
2157 | union qm_mc_command *mcc; |
2158 | union qm_mc_result *mcr; |
2159 | struct qman_portal *p = get_affine_portal(); |
2160 | int ret = 0; |
2161 | |
2162 | mcc = qm_mc_start(portal: &p->p); |
2163 | qm_fqid_set(&mcc->fq, fq->fqid); |
2164 | qm_mc_commit(portal: &p->p, QM_MCC_VERB_QUERYFQ_NP); |
2165 | if (!qm_mc_result_timeout(portal: &p->p, mcr: &mcr)) { |
2166 | ret = -ETIMEDOUT; |
2167 | goto out; |
2168 | } |
2169 | |
2170 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); |
2171 | if (mcr->result == QM_MCR_RESULT_OK) |
2172 | *np = mcr->queryfq_np; |
2173 | else if (mcr->result == QM_MCR_RESULT_ERR_FQID) |
2174 | ret = -ERANGE; |
2175 | else |
2176 | ret = -EIO; |
2177 | out: |
2178 | put_affine_portal(); |
2179 | return ret; |
2180 | } |
2181 | EXPORT_SYMBOL(qman_query_fq_np); |
2182 | |
2183 | static int qman_query_cgr(struct qman_cgr *cgr, |
2184 | struct qm_mcr_querycgr *cgrd) |
2185 | { |
2186 | union qm_mc_command *mcc; |
2187 | union qm_mc_result *mcr; |
2188 | struct qman_portal *p = get_affine_portal(); |
2189 | int ret = 0; |
2190 | |
2191 | mcc = qm_mc_start(portal: &p->p); |
2192 | mcc->cgr.cgid = cgr->cgrid; |
2193 | qm_mc_commit(portal: &p->p, QM_MCC_VERB_QUERYCGR); |
2194 | if (!qm_mc_result_timeout(portal: &p->p, mcr: &mcr)) { |
2195 | ret = -ETIMEDOUT; |
2196 | goto out; |
2197 | } |
2198 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); |
2199 | if (mcr->result == QM_MCR_RESULT_OK) |
2200 | *cgrd = mcr->querycgr; |
2201 | else { |
2202 | dev_err(p->config->dev, "QUERY_CGR failed: %s\n" , |
2203 | mcr_result_str(mcr->result)); |
2204 | ret = -EIO; |
2205 | } |
2206 | out: |
2207 | put_affine_portal(); |
2208 | return ret; |
2209 | } |
2210 | |
2211 | int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result) |
2212 | { |
2213 | struct qm_mcr_querycgr query_cgr; |
2214 | int err; |
2215 | |
2216 | err = qman_query_cgr(cgr, cgrd: &query_cgr); |
2217 | if (err) |
2218 | return err; |
2219 | |
2220 | *result = !!query_cgr.cgr.cs; |
2221 | return 0; |
2222 | } |
2223 | EXPORT_SYMBOL(qman_query_cgr_congested); |
2224 | |
2225 | /* internal function used as a wait_event() expression */ |
2226 | static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr) |
2227 | { |
2228 | unsigned long irqflags; |
2229 | int ret = -EBUSY; |
2230 | |
2231 | local_irq_save(irqflags); |
2232 | if (p->vdqcr_owned) |
2233 | goto out; |
2234 | if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) |
2235 | goto out; |
2236 | |
2237 | fq_set(fq, QMAN_FQ_STATE_VDQCR); |
2238 | p->vdqcr_owned = fq; |
2239 | qm_dqrr_vdqcr_set(portal: &p->p, vdqcr); |
2240 | ret = 0; |
2241 | out: |
2242 | local_irq_restore(irqflags); |
2243 | return ret; |
2244 | } |
2245 | |
2246 | static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr) |
2247 | { |
2248 | int ret; |
2249 | |
2250 | *p = get_affine_portal(); |
2251 | ret = set_p_vdqcr(p: *p, fq, vdqcr); |
2252 | put_affine_portal(); |
2253 | return ret; |
2254 | } |
2255 | |
2256 | static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq, |
2257 | u32 vdqcr, u32 flags) |
2258 | { |
2259 | int ret = 0; |
2260 | |
2261 | if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) |
2262 | ret = wait_event_interruptible(affine_queue, |
2263 | !set_vdqcr(p, fq, vdqcr)); |
2264 | else |
2265 | wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr)); |
2266 | return ret; |
2267 | } |
2268 | |
2269 | int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr) |
2270 | { |
2271 | struct qman_portal *p; |
2272 | int ret; |
2273 | |
2274 | if (fq->state != qman_fq_state_parked && |
2275 | fq->state != qman_fq_state_retired) |
2276 | return -EINVAL; |
2277 | if (vdqcr & QM_VDQCR_FQID_MASK) |
2278 | return -EINVAL; |
2279 | if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) |
2280 | return -EBUSY; |
2281 | vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; |
2282 | if (flags & QMAN_VOLATILE_FLAG_WAIT) |
2283 | ret = wait_vdqcr_start(p: &p, fq, vdqcr, flags); |
2284 | else |
2285 | ret = set_vdqcr(p: &p, fq, vdqcr); |
2286 | if (ret) |
2287 | return ret; |
2288 | /* VDQCR is set */ |
2289 | if (flags & QMAN_VOLATILE_FLAG_FINISH) { |
2290 | if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) |
2291 | /* |
2292 | * NB: don't propagate any error - the caller wouldn't |
2293 | * know whether the VDQCR was issued or not. A signal |
2294 | * could arrive after returning anyway, so the caller |
2295 | * can check signal_pending() if that's an issue. |
2296 | */ |
2297 | wait_event_interruptible(affine_queue, |
2298 | !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); |
2299 | else |
2300 | wait_event(affine_queue, |
2301 | !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); |
2302 | } |
2303 | return 0; |
2304 | } |
2305 | EXPORT_SYMBOL(qman_volatile_dequeue); |
2306 | |
2307 | static void update_eqcr_ci(struct qman_portal *p, u8 avail) |
2308 | { |
2309 | if (avail) |
2310 | qm_eqcr_cce_prefetch(portal: &p->p); |
2311 | else |
2312 | qm_eqcr_cce_update(portal: &p->p); |
2313 | } |
2314 | |
2315 | int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) |
2316 | { |
2317 | struct qman_portal *p; |
2318 | struct qm_eqcr_entry *eq; |
2319 | unsigned long irqflags; |
2320 | u8 avail; |
2321 | |
2322 | p = get_affine_portal(); |
2323 | local_irq_save(irqflags); |
2324 | |
2325 | if (p->use_eqcr_ci_stashing) { |
2326 | /* |
2327 | * The stashing case is easy, only update if we need to in |
2328 | * order to try and liberate ring entries. |
2329 | */ |
2330 | eq = qm_eqcr_start_stash(portal: &p->p); |
2331 | } else { |
2332 | /* |
2333 | * The non-stashing case is harder, need to prefetch ahead of |
2334 | * time. |
2335 | */ |
2336 | avail = qm_eqcr_get_avail(portal: &p->p); |
2337 | if (avail < 2) |
2338 | update_eqcr_ci(p, avail); |
2339 | eq = qm_eqcr_start_no_stash(portal: &p->p); |
2340 | } |
2341 | |
2342 | if (unlikely(!eq)) |
2343 | goto out; |
2344 | |
2345 | qm_fqid_set(eq, fq->fqid); |
2346 | eq->tag = cpu_to_be32(fq_to_tag(fq)); |
2347 | eq->fd = *fd; |
2348 | |
2349 | qm_eqcr_pvb_commit(portal: &p->p, QM_EQCR_VERB_CMD_ENQUEUE); |
2350 | out: |
2351 | local_irq_restore(irqflags); |
2352 | put_affine_portal(); |
2353 | return 0; |
2354 | } |
2355 | EXPORT_SYMBOL(qman_enqueue); |
2356 | |
2357 | static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags, |
2358 | struct qm_mcc_initcgr *opts) |
2359 | { |
2360 | union qm_mc_command *mcc; |
2361 | union qm_mc_result *mcr; |
2362 | struct qman_portal *p = get_affine_portal(); |
2363 | u8 verb = QM_MCC_VERB_MODIFYCGR; |
2364 | int ret = 0; |
2365 | |
2366 | mcc = qm_mc_start(portal: &p->p); |
2367 | if (opts) |
2368 | mcc->initcgr = *opts; |
2369 | mcc->initcgr.cgid = cgr->cgrid; |
2370 | if (flags & QMAN_CGR_FLAG_USE_INIT) |
2371 | verb = QM_MCC_VERB_INITCGR; |
2372 | qm_mc_commit(portal: &p->p, myverb: verb); |
2373 | if (!qm_mc_result_timeout(portal: &p->p, mcr: &mcr)) { |
2374 | ret = -ETIMEDOUT; |
2375 | goto out; |
2376 | } |
2377 | |
2378 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); |
2379 | if (mcr->result != QM_MCR_RESULT_OK) |
2380 | ret = -EIO; |
2381 | |
2382 | out: |
2383 | put_affine_portal(); |
2384 | return ret; |
2385 | } |
2386 | |
2387 | #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) |
2388 | |
2389 | /* congestion state change notification target update control */ |
2390 | static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val) |
2391 | { |
2392 | if (qman_ip_rev >= QMAN_REV30) |
2393 | cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi | |
2394 | QM_CGR_TARG_UDP_CTRL_WRITE_BIT); |
2395 | else |
2396 | cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi)); |
2397 | } |
2398 | |
2399 | static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val) |
2400 | { |
2401 | if (qman_ip_rev >= QMAN_REV30) |
2402 | cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi); |
2403 | else |
2404 | cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi)); |
2405 | } |
2406 | |
2407 | static u8 qman_cgr_cpus[CGR_NUM]; |
2408 | |
2409 | void qman_init_cgr_all(void) |
2410 | { |
2411 | struct qman_cgr cgr; |
2412 | int err_cnt = 0; |
2413 | |
2414 | for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) { |
2415 | if (qm_modify_cgr(cgr: &cgr, QMAN_CGR_FLAG_USE_INIT, NULL)) |
2416 | err_cnt++; |
2417 | } |
2418 | |
2419 | if (err_cnt) |
2420 | pr_err("Warning: %d error%s while initialising CGR h/w\n" , |
2421 | err_cnt, (err_cnt > 1) ? "s" : "" ); |
2422 | } |
2423 | |
2424 | int qman_create_cgr(struct qman_cgr *cgr, u32 flags, |
2425 | struct qm_mcc_initcgr *opts) |
2426 | { |
2427 | struct qm_mcr_querycgr cgr_state; |
2428 | int ret; |
2429 | struct qman_portal *p; |
2430 | |
2431 | /* |
2432 | * We have to check that the provided CGRID is within the limits of the |
2433 | * data-structures, for obvious reasons. However we'll let h/w take |
2434 | * care of determining whether it's within the limits of what exists on |
2435 | * the SoC. |
2436 | */ |
2437 | if (cgr->cgrid >= CGR_NUM) |
2438 | return -EINVAL; |
2439 | |
2440 | preempt_disable(); |
2441 | p = get_affine_portal(); |
2442 | qman_cgr_cpus[cgr->cgrid] = smp_processor_id(); |
2443 | preempt_enable(); |
2444 | |
2445 | cgr->chan = p->config->channel; |
2446 | raw_spin_lock_irq(&p->cgr_lock); |
2447 | |
2448 | if (opts) { |
2449 | struct qm_mcc_initcgr local_opts = *opts; |
2450 | |
2451 | ret = qman_query_cgr(cgr, cgrd: &cgr_state); |
2452 | if (ret) |
2453 | goto out; |
2454 | |
2455 | qm_cgr_cscn_targ_set(cgr: &local_opts.cgr, PORTAL_IDX(p), |
2456 | be32_to_cpu(cgr_state.cgr.cscn_targ)); |
2457 | local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG); |
2458 | |
2459 | /* send init if flags indicate so */ |
2460 | if (flags & QMAN_CGR_FLAG_USE_INIT) |
2461 | ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, |
2462 | opts: &local_opts); |
2463 | else |
2464 | ret = qm_modify_cgr(cgr, flags: 0, opts: &local_opts); |
2465 | if (ret) |
2466 | goto out; |
2467 | } |
2468 | |
2469 | list_add(new: &cgr->node, head: &p->cgr_cbs); |
2470 | |
2471 | /* Determine if newly added object requires its callback to be called */ |
2472 | ret = qman_query_cgr(cgr, cgrd: &cgr_state); |
2473 | if (ret) { |
2474 | /* we can't go back, so proceed and return success */ |
2475 | dev_err(p->config->dev, "CGR HW state partially modified\n" ); |
2476 | ret = 0; |
2477 | goto out; |
2478 | } |
2479 | if (cgr->cb && cgr_state.cgr.cscn_en && |
2480 | qman_cgrs_get(c: &p->cgrs[1], cgr: cgr->cgrid)) |
2481 | cgr->cb(p, cgr, 1); |
2482 | out: |
2483 | raw_spin_unlock_irq(&p->cgr_lock); |
2484 | put_affine_portal(); |
2485 | return ret; |
2486 | } |
2487 | EXPORT_SYMBOL(qman_create_cgr); |
2488 | |
2489 | static struct qman_portal *qman_cgr_get_affine_portal(struct qman_cgr *cgr) |
2490 | { |
2491 | struct qman_portal *p = get_affine_portal(); |
2492 | |
2493 | if (cgr->chan != p->config->channel) { |
2494 | /* attempt to delete from other portal than creator */ |
2495 | dev_err(p->config->dev, "CGR not owned by current portal" ); |
2496 | dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n" , |
2497 | cgr->chan, p->config->channel); |
2498 | put_affine_portal(); |
2499 | return NULL; |
2500 | } |
2501 | |
2502 | return p; |
2503 | } |
2504 | |
2505 | int qman_delete_cgr(struct qman_cgr *cgr) |
2506 | { |
2507 | unsigned long irqflags; |
2508 | struct qm_mcr_querycgr cgr_state; |
2509 | struct qm_mcc_initcgr local_opts; |
2510 | int ret = 0; |
2511 | struct qman_cgr *i; |
2512 | struct qman_portal *p = qman_cgr_get_affine_portal(cgr); |
2513 | |
2514 | if (!p) |
2515 | return -EINVAL; |
2516 | |
2517 | memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); |
2518 | raw_spin_lock_irqsave(&p->cgr_lock, irqflags); |
2519 | list_del(entry: &cgr->node); |
2520 | /* |
2521 | * If there are no other CGR objects for this CGRID in the list, |
2522 | * update CSCN_TARG accordingly |
2523 | */ |
2524 | list_for_each_entry(i, &p->cgr_cbs, node) |
2525 | if (i->cgrid == cgr->cgrid && i->cb) |
2526 | goto release_lock; |
2527 | ret = qman_query_cgr(cgr, cgrd: &cgr_state); |
2528 | if (ret) { |
2529 | /* add back to the list */ |
2530 | list_add(new: &cgr->node, head: &p->cgr_cbs); |
2531 | goto release_lock; |
2532 | } |
2533 | |
2534 | local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG); |
2535 | qm_cgr_cscn_targ_clear(cgr: &local_opts.cgr, PORTAL_IDX(p), |
2536 | be32_to_cpu(cgr_state.cgr.cscn_targ)); |
2537 | |
2538 | ret = qm_modify_cgr(cgr, flags: 0, opts: &local_opts); |
2539 | if (ret) |
2540 | /* add back to the list */ |
2541 | list_add(new: &cgr->node, head: &p->cgr_cbs); |
2542 | release_lock: |
2543 | raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags); |
2544 | put_affine_portal(); |
2545 | return ret; |
2546 | } |
2547 | EXPORT_SYMBOL(qman_delete_cgr); |
2548 | |
2549 | struct cgr_comp { |
2550 | struct qman_cgr *cgr; |
2551 | struct completion completion; |
2552 | }; |
2553 | |
2554 | static void qman_delete_cgr_smp_call(void *p) |
2555 | { |
2556 | qman_delete_cgr((struct qman_cgr *)p); |
2557 | } |
2558 | |
2559 | void qman_delete_cgr_safe(struct qman_cgr *cgr) |
2560 | { |
2561 | preempt_disable(); |
2562 | if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { |
2563 | smp_call_function_single(cpuid: qman_cgr_cpus[cgr->cgrid], |
2564 | func: qman_delete_cgr_smp_call, info: cgr, wait: true); |
2565 | preempt_enable(); |
2566 | return; |
2567 | } |
2568 | |
2569 | qman_delete_cgr(cgr); |
2570 | preempt_enable(); |
2571 | } |
2572 | EXPORT_SYMBOL(qman_delete_cgr_safe); |
2573 | |
2574 | static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts) |
2575 | { |
2576 | int ret; |
2577 | unsigned long irqflags; |
2578 | struct qman_portal *p = qman_cgr_get_affine_portal(cgr); |
2579 | |
2580 | if (!p) |
2581 | return -EINVAL; |
2582 | |
2583 | raw_spin_lock_irqsave(&p->cgr_lock, irqflags); |
2584 | ret = qm_modify_cgr(cgr, flags: 0, opts); |
2585 | raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags); |
2586 | put_affine_portal(); |
2587 | return ret; |
2588 | } |
2589 | |
2590 | struct update_cgr_params { |
2591 | struct qman_cgr *cgr; |
2592 | struct qm_mcc_initcgr *opts; |
2593 | int ret; |
2594 | }; |
2595 | |
2596 | static void qman_update_cgr_smp_call(void *p) |
2597 | { |
2598 | struct update_cgr_params *params = p; |
2599 | |
2600 | params->ret = qman_update_cgr(cgr: params->cgr, opts: params->opts); |
2601 | } |
2602 | |
2603 | int qman_update_cgr_safe(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts) |
2604 | { |
2605 | struct update_cgr_params params = { |
2606 | .cgr = cgr, |
2607 | .opts = opts, |
2608 | }; |
2609 | |
2610 | preempt_disable(); |
2611 | if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) |
2612 | smp_call_function_single(cpuid: qman_cgr_cpus[cgr->cgrid], |
2613 | func: qman_update_cgr_smp_call, info: ¶ms, |
2614 | wait: true); |
2615 | else |
2616 | params.ret = qman_update_cgr(cgr, opts); |
2617 | preempt_enable(); |
2618 | return params.ret; |
2619 | } |
2620 | EXPORT_SYMBOL(qman_update_cgr_safe); |
2621 | |
2622 | /* Cleanup FQs */ |
2623 | |
2624 | static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v) |
2625 | { |
2626 | const union qm_mr_entry *msg; |
2627 | int found = 0; |
2628 | |
2629 | qm_mr_pvb_update(portal: p); |
2630 | msg = qm_mr_current(portal: p); |
2631 | while (msg) { |
2632 | if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v) |
2633 | found = 1; |
2634 | qm_mr_next(portal: p); |
2635 | qm_mr_cci_consume_to_current(portal: p); |
2636 | qm_mr_pvb_update(portal: p); |
2637 | msg = qm_mr_current(portal: p); |
2638 | } |
2639 | return found; |
2640 | } |
2641 | |
2642 | static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s, |
2643 | bool wait) |
2644 | { |
2645 | const struct qm_dqrr_entry *dqrr; |
2646 | int found = 0; |
2647 | |
2648 | do { |
2649 | qm_dqrr_pvb_update(portal: p); |
2650 | dqrr = qm_dqrr_current(portal: p); |
2651 | if (!dqrr) |
2652 | cpu_relax(); |
2653 | } while (wait && !dqrr); |
2654 | |
2655 | while (dqrr) { |
2656 | if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s)) |
2657 | found = 1; |
2658 | qm_dqrr_cdc_consume_1ptr(portal: p, dq: dqrr, park: 0); |
2659 | qm_dqrr_pvb_update(portal: p); |
2660 | qm_dqrr_next(portal: p); |
2661 | dqrr = qm_dqrr_current(portal: p); |
2662 | } |
2663 | return found; |
2664 | } |
2665 | |
2666 | #define qm_mr_drain(p, V) \ |
2667 | _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V) |
2668 | |
2669 | #define qm_dqrr_drain(p, f, S) \ |
2670 | _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false) |
2671 | |
2672 | #define qm_dqrr_drain_wait(p, f, S) \ |
2673 | _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true) |
2674 | |
2675 | #define qm_dqrr_drain_nomatch(p) \ |
2676 | _qm_dqrr_consume_and_match(p, 0, 0, false) |
2677 | |
2678 | int qman_shutdown_fq(u32 fqid) |
2679 | { |
2680 | struct qman_portal *p, *channel_portal; |
2681 | struct device *dev; |
2682 | union qm_mc_command *mcc; |
2683 | union qm_mc_result *mcr; |
2684 | int orl_empty, drain = 0, ret = 0; |
2685 | u32 channel, res; |
2686 | u8 state; |
2687 | |
2688 | p = get_affine_portal(); |
2689 | dev = p->config->dev; |
2690 | /* Determine the state of the FQID */ |
2691 | mcc = qm_mc_start(portal: &p->p); |
2692 | qm_fqid_set(&mcc->fq, fqid); |
2693 | qm_mc_commit(portal: &p->p, QM_MCC_VERB_QUERYFQ_NP); |
2694 | if (!qm_mc_result_timeout(portal: &p->p, mcr: &mcr)) { |
2695 | dev_err(dev, "QUERYFQ_NP timeout\n" ); |
2696 | ret = -ETIMEDOUT; |
2697 | goto out; |
2698 | } |
2699 | |
2700 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); |
2701 | state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; |
2702 | if (state == QM_MCR_NP_STATE_OOS) |
2703 | goto out; /* Already OOS, no need to do anymore checks */ |
2704 | |
2705 | /* Query which channel the FQ is using */ |
2706 | mcc = qm_mc_start(portal: &p->p); |
2707 | qm_fqid_set(&mcc->fq, fqid); |
2708 | qm_mc_commit(portal: &p->p, QM_MCC_VERB_QUERYFQ); |
2709 | if (!qm_mc_result_timeout(portal: &p->p, mcr: &mcr)) { |
2710 | dev_err(dev, "QUERYFQ timeout\n" ); |
2711 | ret = -ETIMEDOUT; |
2712 | goto out; |
2713 | } |
2714 | |
2715 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); |
2716 | /* Need to store these since the MCR gets reused */ |
2717 | channel = qm_fqd_get_chan(fqd: &mcr->queryfq.fqd); |
2718 | qm_fqd_get_wq(fqd: &mcr->queryfq.fqd); |
2719 | |
2720 | if (channel < qm_channel_pool1) { |
2721 | channel_portal = get_portal_for_channel(channel); |
2722 | if (channel_portal == NULL) { |
2723 | dev_err(dev, "Can't find portal for dedicated channel 0x%x\n" , |
2724 | channel); |
2725 | ret = -EIO; |
2726 | goto out; |
2727 | } |
2728 | } else |
2729 | channel_portal = p; |
2730 | |
2731 | switch (state) { |
2732 | case QM_MCR_NP_STATE_TEN_SCHED: |
2733 | case QM_MCR_NP_STATE_TRU_SCHED: |
2734 | case QM_MCR_NP_STATE_ACTIVE: |
2735 | case QM_MCR_NP_STATE_PARKED: |
2736 | orl_empty = 0; |
2737 | mcc = qm_mc_start(portal: &channel_portal->p); |
2738 | qm_fqid_set(&mcc->fq, fqid); |
2739 | qm_mc_commit(portal: &channel_portal->p, QM_MCC_VERB_ALTER_RETIRE); |
2740 | if (!qm_mc_result_timeout(portal: &channel_portal->p, mcr: &mcr)) { |
2741 | dev_err(dev, "ALTER_RETIRE timeout\n" ); |
2742 | ret = -ETIMEDOUT; |
2743 | goto out; |
2744 | } |
2745 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == |
2746 | QM_MCR_VERB_ALTER_RETIRE); |
2747 | res = mcr->result; /* Make a copy as we reuse MCR below */ |
2748 | |
2749 | if (res == QM_MCR_RESULT_OK) |
2750 | drain_mr_fqrni(p: &channel_portal->p); |
2751 | |
2752 | if (res == QM_MCR_RESULT_PENDING) { |
2753 | /* |
2754 | * Need to wait for the FQRN in the message ring, which |
2755 | * will only occur once the FQ has been drained. In |
2756 | * order for the FQ to drain the portal needs to be set |
2757 | * to dequeue from the channel the FQ is scheduled on |
2758 | */ |
2759 | int found_fqrn = 0; |
2760 | |
2761 | /* Flag that we need to drain FQ */ |
2762 | drain = 1; |
2763 | |
2764 | if (channel >= qm_channel_pool1 && |
2765 | channel < qm_channel_pool1 + 15) { |
2766 | /* Pool channel, enable the bit in the portal */ |
2767 | } else if (channel < qm_channel_pool1) { |
2768 | /* Dedicated channel */ |
2769 | } else { |
2770 | dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x" , |
2771 | fqid, channel); |
2772 | ret = -EBUSY; |
2773 | goto out; |
2774 | } |
2775 | /* Set the sdqcr to drain this channel */ |
2776 | if (channel < qm_channel_pool1) |
2777 | qm_dqrr_sdqcr_set(portal: &channel_portal->p, |
2778 | QM_SDQCR_TYPE_ACTIVE | |
2779 | QM_SDQCR_CHANNELS_DEDICATED); |
2780 | else |
2781 | qm_dqrr_sdqcr_set(portal: &channel_portal->p, |
2782 | QM_SDQCR_TYPE_ACTIVE | |
2783 | QM_SDQCR_CHANNELS_POOL_CONV |
2784 | (channel)); |
2785 | do { |
2786 | /* Keep draining DQRR while checking the MR*/ |
2787 | qm_dqrr_drain_nomatch(&channel_portal->p); |
2788 | /* Process message ring too */ |
2789 | found_fqrn = qm_mr_drain(&channel_portal->p, |
2790 | FQRN); |
2791 | cpu_relax(); |
2792 | } while (!found_fqrn); |
2793 | /* Restore SDQCR */ |
2794 | qm_dqrr_sdqcr_set(portal: &channel_portal->p, |
2795 | sdqcr: channel_portal->sdqcr); |
2796 | |
2797 | } |
2798 | if (res != QM_MCR_RESULT_OK && |
2799 | res != QM_MCR_RESULT_PENDING) { |
2800 | dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n" , |
2801 | fqid, res); |
2802 | ret = -EIO; |
2803 | goto out; |
2804 | } |
2805 | if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) { |
2806 | /* |
2807 | * ORL had no entries, no need to wait until the |
2808 | * ERNs come in |
2809 | */ |
2810 | orl_empty = 1; |
2811 | } |
2812 | /* |
2813 | * Retirement succeeded, check to see if FQ needs |
2814 | * to be drained |
2815 | */ |
2816 | if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) { |
2817 | /* FQ is Not Empty, drain using volatile DQ commands */ |
2818 | do { |
2819 | u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3); |
2820 | |
2821 | qm_dqrr_vdqcr_set(portal: &p->p, vdqcr); |
2822 | /* |
2823 | * Wait for a dequeue and process the dequeues, |
2824 | * making sure to empty the ring completely |
2825 | */ |
2826 | } while (!qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY)); |
2827 | } |
2828 | |
2829 | while (!orl_empty) { |
2830 | /* Wait for the ORL to have been completely drained */ |
2831 | orl_empty = qm_mr_drain(&p->p, FQRL); |
2832 | cpu_relax(); |
2833 | } |
2834 | mcc = qm_mc_start(portal: &p->p); |
2835 | qm_fqid_set(&mcc->fq, fqid); |
2836 | qm_mc_commit(portal: &p->p, QM_MCC_VERB_ALTER_OOS); |
2837 | if (!qm_mc_result_timeout(portal: &p->p, mcr: &mcr)) { |
2838 | ret = -ETIMEDOUT; |
2839 | goto out; |
2840 | } |
2841 | |
2842 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == |
2843 | QM_MCR_VERB_ALTER_OOS); |
2844 | if (mcr->result != QM_MCR_RESULT_OK) { |
2845 | dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n" , |
2846 | fqid, mcr->result); |
2847 | ret = -EIO; |
2848 | goto out; |
2849 | } |
2850 | break; |
2851 | |
2852 | case QM_MCR_NP_STATE_RETIRED: |
2853 | /* Send OOS Command */ |
2854 | mcc = qm_mc_start(portal: &p->p); |
2855 | qm_fqid_set(&mcc->fq, fqid); |
2856 | qm_mc_commit(portal: &p->p, QM_MCC_VERB_ALTER_OOS); |
2857 | if (!qm_mc_result_timeout(portal: &p->p, mcr: &mcr)) { |
2858 | ret = -ETIMEDOUT; |
2859 | goto out; |
2860 | } |
2861 | |
2862 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == |
2863 | QM_MCR_VERB_ALTER_OOS); |
2864 | if (mcr->result != QM_MCR_RESULT_OK) { |
2865 | dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n" , |
2866 | fqid, mcr->result); |
2867 | ret = -EIO; |
2868 | goto out; |
2869 | } |
2870 | break; |
2871 | |
2872 | case QM_MCR_NP_STATE_OOS: |
2873 | /* Done */ |
2874 | break; |
2875 | |
2876 | default: |
2877 | ret = -EIO; |
2878 | } |
2879 | |
2880 | out: |
2881 | put_affine_portal(); |
2882 | return ret; |
2883 | } |
2884 | |
2885 | const struct qm_portal_config *qman_get_qm_portal_config( |
2886 | struct qman_portal *portal) |
2887 | { |
2888 | return portal->config; |
2889 | } |
2890 | EXPORT_SYMBOL(qman_get_qm_portal_config); |
2891 | |
2892 | struct gen_pool *qm_fqalloc; /* FQID allocator */ |
2893 | struct gen_pool *qm_qpalloc; /* pool-channel allocator */ |
2894 | struct gen_pool *qm_cgralloc; /* CGR ID allocator */ |
2895 | |
2896 | static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt) |
2897 | { |
2898 | unsigned long addr; |
2899 | |
2900 | if (!p) |
2901 | return -ENODEV; |
2902 | |
2903 | addr = gen_pool_alloc(pool: p, size: cnt); |
2904 | if (!addr) |
2905 | return -ENOMEM; |
2906 | |
2907 | *result = addr & ~DPAA_GENALLOC_OFF; |
2908 | |
2909 | return 0; |
2910 | } |
2911 | |
2912 | int qman_alloc_fqid_range(u32 *result, u32 count) |
2913 | { |
2914 | return qman_alloc_range(p: qm_fqalloc, result, cnt: count); |
2915 | } |
2916 | EXPORT_SYMBOL(qman_alloc_fqid_range); |
2917 | |
2918 | int qman_alloc_pool_range(u32 *result, u32 count) |
2919 | { |
2920 | return qman_alloc_range(p: qm_qpalloc, result, cnt: count); |
2921 | } |
2922 | EXPORT_SYMBOL(qman_alloc_pool_range); |
2923 | |
2924 | int qman_alloc_cgrid_range(u32 *result, u32 count) |
2925 | { |
2926 | return qman_alloc_range(p: qm_cgralloc, result, cnt: count); |
2927 | } |
2928 | EXPORT_SYMBOL(qman_alloc_cgrid_range); |
2929 | |
2930 | int qman_release_fqid(u32 fqid) |
2931 | { |
2932 | int ret = qman_shutdown_fq(fqid); |
2933 | |
2934 | if (ret) { |
2935 | pr_debug("FQID %d leaked\n" , fqid); |
2936 | return ret; |
2937 | } |
2938 | |
2939 | gen_pool_free(pool: qm_fqalloc, addr: fqid | DPAA_GENALLOC_OFF, size: 1); |
2940 | return 0; |
2941 | } |
2942 | EXPORT_SYMBOL(qman_release_fqid); |
2943 | |
2944 | static int qpool_cleanup(u32 qp) |
2945 | { |
2946 | /* |
2947 | * We query all FQDs starting from |
2948 | * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs |
2949 | * whose destination channel is the pool-channel being released. |
2950 | * When a non-OOS FQD is found we attempt to clean it up |
2951 | */ |
2952 | struct qman_fq fq = { |
2953 | .fqid = QM_FQID_RANGE_START |
2954 | }; |
2955 | int err; |
2956 | |
2957 | do { |
2958 | struct qm_mcr_queryfq_np np; |
2959 | |
2960 | err = qman_query_fq_np(&fq, &np); |
2961 | if (err == -ERANGE) |
2962 | /* FQID range exceeded, found no problems */ |
2963 | return 0; |
2964 | else if (WARN_ON(err)) |
2965 | return err; |
2966 | |
2967 | if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { |
2968 | struct qm_fqd fqd; |
2969 | |
2970 | err = qman_query_fq(fq: &fq, fqd: &fqd); |
2971 | if (WARN_ON(err)) |
2972 | return err; |
2973 | if (qm_fqd_get_chan(fqd: &fqd) == qp) { |
2974 | /* The channel is the FQ's target, clean it */ |
2975 | err = qman_shutdown_fq(fqid: fq.fqid); |
2976 | if (err) |
2977 | /* |
2978 | * Couldn't shut down the FQ |
2979 | * so the pool must be leaked |
2980 | */ |
2981 | return err; |
2982 | } |
2983 | } |
2984 | /* Move to the next FQID */ |
2985 | fq.fqid++; |
2986 | } while (1); |
2987 | } |
2988 | |
2989 | int qman_release_pool(u32 qp) |
2990 | { |
2991 | int ret; |
2992 | |
2993 | ret = qpool_cleanup(qp); |
2994 | if (ret) { |
2995 | pr_debug("CHID %d leaked\n" , qp); |
2996 | return ret; |
2997 | } |
2998 | |
2999 | gen_pool_free(pool: qm_qpalloc, addr: qp | DPAA_GENALLOC_OFF, size: 1); |
3000 | return 0; |
3001 | } |
3002 | EXPORT_SYMBOL(qman_release_pool); |
3003 | |
3004 | static int cgr_cleanup(u32 cgrid) |
3005 | { |
3006 | /* |
3007 | * query all FQDs starting from FQID 1 until we get an "invalid FQID" |
3008 | * error, looking for non-OOS FQDs whose CGR is the CGR being released |
3009 | */ |
3010 | struct qman_fq fq = { |
3011 | .fqid = QM_FQID_RANGE_START |
3012 | }; |
3013 | int err; |
3014 | |
3015 | do { |
3016 | struct qm_mcr_queryfq_np np; |
3017 | |
3018 | err = qman_query_fq_np(&fq, &np); |
3019 | if (err == -ERANGE) |
3020 | /* FQID range exceeded, found no problems */ |
3021 | return 0; |
3022 | else if (WARN_ON(err)) |
3023 | return err; |
3024 | |
3025 | if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { |
3026 | struct qm_fqd fqd; |
3027 | |
3028 | err = qman_query_fq(fq: &fq, fqd: &fqd); |
3029 | if (WARN_ON(err)) |
3030 | return err; |
3031 | if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE && |
3032 | fqd.cgid == cgrid) { |
3033 | pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n" , |
3034 | cgrid, fq.fqid); |
3035 | return -EIO; |
3036 | } |
3037 | } |
3038 | /* Move to the next FQID */ |
3039 | fq.fqid++; |
3040 | } while (1); |
3041 | } |
3042 | |
3043 | int qman_release_cgrid(u32 cgrid) |
3044 | { |
3045 | int ret; |
3046 | |
3047 | ret = cgr_cleanup(cgrid); |
3048 | if (ret) { |
3049 | pr_debug("CGRID %d leaked\n" , cgrid); |
3050 | return ret; |
3051 | } |
3052 | |
3053 | gen_pool_free(pool: qm_cgralloc, addr: cgrid | DPAA_GENALLOC_OFF, size: 1); |
3054 | return 0; |
3055 | } |
3056 | EXPORT_SYMBOL(qman_release_cgrid); |
3057 | |