1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. |
2 | * |
3 | * Redistribution and use in source and binary forms, with or without |
4 | * modification, are permitted provided that the following conditions are met: |
5 | * * Redistributions of source code must retain the above copyright |
6 | * notice, this list of conditions and the following disclaimer. |
7 | * * Redistributions in binary form must reproduce the above copyright |
8 | * notice, this list of conditions and the following disclaimer in the |
9 | * documentation and/or other materials provided with the distribution. |
10 | * * Neither the name of Freescale Semiconductor nor the |
11 | * names of its contributors may be used to endorse or promote products |
12 | * derived from this software without specific prior written permission. |
13 | * |
14 | * ALTERNATIVELY, this software may be distributed under the terms of the |
15 | * GNU General Public License ("GPL") as published by the Free Software |
16 | * Foundation, either version 2 of that License or (at your option) any |
17 | * later version. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY |
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY |
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
29 | */ |
30 | |
31 | #include "qman_priv.h" |
32 | |
33 | u16 qman_ip_rev; |
34 | EXPORT_SYMBOL(qman_ip_rev); |
35 | u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1; |
36 | EXPORT_SYMBOL(qm_channel_pool1); |
37 | u16 qm_channel_caam = QMAN_CHANNEL_CAAM; |
38 | EXPORT_SYMBOL(qm_channel_caam); |
39 | |
40 | /* Register offsets */ |
41 | #define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10)) |
42 | #define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10)) |
43 | #define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10)) |
44 | #define REG_DD_CFG 0x0200 |
45 | #define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10)) |
46 | #define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10)) |
47 | #define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10)) |
48 | #define REG_PFDR_FPC 0x0400 |
49 | #define REG_PFDR_FP_HEAD 0x0404 |
50 | #define REG_PFDR_FP_TAIL 0x0408 |
51 | #define REG_PFDR_FP_LWIT 0x0410 |
52 | #define REG_PFDR_CFG 0x0414 |
53 | #define REG_SFDR_CFG 0x0500 |
54 | #define REG_SFDR_IN_USE 0x0504 |
55 | #define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04)) |
56 | #define REG_WQ_DEF_ENC_WQID 0x0630 |
57 | #define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04)) |
58 | #define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04)) |
59 | #define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04)) |
60 | #define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04)) |
61 | #define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */ |
62 | #define REG_CM_CFG 0x0800 |
63 | #define REG_ECSR 0x0a00 |
64 | #define REG_ECIR 0x0a04 |
65 | #define REG_EADR 0x0a08 |
66 | #define REG_ECIR2 0x0a0c |
67 | #define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) |
68 | #define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) |
69 | #define REG_MCR 0x0b00 |
70 | #define REG_MCP(n) (0x0b04 + ((n) * 0x04)) |
71 | #define REG_MISC_CFG 0x0be0 |
72 | #define REG_HID_CFG 0x0bf0 |
73 | #define REG_IDLE_STAT 0x0bf4 |
74 | #define REG_IP_REV_1 0x0bf8 |
75 | #define REG_IP_REV_2 0x0bfc |
76 | #define REG_FQD_BARE 0x0c00 |
77 | #define REG_PFDR_BARE 0x0c20 |
78 | #define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */ |
79 | #define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */ |
80 | #define REG_QCSP_BARE 0x0c80 |
81 | #define REG_QCSP_BAR 0x0c84 |
82 | #define REG_CI_SCHED_CFG 0x0d00 |
83 | #define REG_SRCIDR 0x0d04 |
84 | #define REG_LIODNR 0x0d08 |
85 | #define REG_CI_RLM_AVG 0x0d14 |
86 | #define REG_ERR_ISR 0x0e00 |
87 | #define REG_ERR_IER 0x0e04 |
88 | #define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10)) |
89 | #define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10)) |
90 | #define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10)) |
91 | |
92 | /* Assists for QMAN_MCR */ |
93 | #define MCR_INIT_PFDR 0x01000000 |
94 | #define MCR_get_rslt(v) (u8)((v) >> 24) |
95 | #define MCR_rslt_idle(r) (!(r) || ((r) >= 0xf0)) |
96 | #define MCR_rslt_ok(r) ((r) == 0xf0) |
97 | #define MCR_rslt_eaccess(r) ((r) == 0xf8) |
98 | #define MCR_rslt_inval(r) ((r) == 0xff) |
99 | |
100 | /* |
101 | * Corenet initiator settings. Stash request queues are 4-deep to match cores |
102 | * ability to snarf. Stash priority is 3, other priorities are 2. |
103 | */ |
104 | #define QM_CI_SCHED_CFG_SRCCIV 4 |
105 | #define QM_CI_SCHED_CFG_SRQ_W 3 |
106 | #define QM_CI_SCHED_CFG_RW_W 2 |
107 | #define QM_CI_SCHED_CFG_BMAN_W 2 |
108 | /* write SRCCIV enable */ |
109 | #define QM_CI_SCHED_CFG_SRCCIV_EN BIT(31) |
110 | |
111 | /* Follows WQ_CS_CFG0-5 */ |
112 | enum qm_wq_class { |
113 | qm_wq_portal = 0, |
114 | qm_wq_pool = 1, |
115 | qm_wq_fman0 = 2, |
116 | qm_wq_fman1 = 3, |
117 | qm_wq_caam = 4, |
118 | qm_wq_pme = 5, |
119 | qm_wq_first = qm_wq_portal, |
120 | qm_wq_last = qm_wq_pme |
121 | }; |
122 | |
123 | /* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */ |
124 | enum qm_memory { |
125 | qm_memory_fqd, |
126 | qm_memory_pfdr |
127 | }; |
128 | |
129 | /* Used by all error interrupt registers except 'inhibit' */ |
130 | #define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */ |
131 | #define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */ |
132 | #define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */ |
133 | #define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */ |
134 | #define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */ |
135 | #define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */ |
136 | #define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */ |
137 | #define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */ |
138 | #define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */ |
139 | #define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */ |
140 | #define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */ |
141 | #define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */ |
142 | #define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */ |
143 | #define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */ |
144 | #define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */ |
145 | #define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */ |
146 | #define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */ |
147 | #define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */ |
148 | |
149 | /* QMAN_ECIR valid error bit */ |
150 | #define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \ |
151 | QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \ |
152 | QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI) |
153 | #define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \ |
154 | QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \ |
155 | QM_EIRQ_IFSI) |
156 | |
157 | struct qm_ecir { |
158 | u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */ |
159 | }; |
160 | |
161 | static bool qm_ecir_is_dcp(const struct qm_ecir *p) |
162 | { |
163 | return p->info & BIT(29); |
164 | } |
165 | |
166 | static int qm_ecir_get_pnum(const struct qm_ecir *p) |
167 | { |
168 | return (p->info >> 24) & 0x1f; |
169 | } |
170 | |
171 | static int qm_ecir_get_fqid(const struct qm_ecir *p) |
172 | { |
173 | return p->info & (BIT(24) - 1); |
174 | } |
175 | |
176 | struct qm_ecir2 { |
177 | u32 info; /* ptyp[31], res[10-30], pnum[0-9] */ |
178 | }; |
179 | |
180 | static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p) |
181 | { |
182 | return p->info & BIT(31); |
183 | } |
184 | |
185 | static int qm_ecir2_get_pnum(const struct qm_ecir2 *p) |
186 | { |
187 | return p->info & (BIT(10) - 1); |
188 | } |
189 | |
190 | struct qm_eadr { |
191 | u32 info; /* memid[24-27], eadr[0-11] */ |
192 | /* v3: memid[24-28], eadr[0-15] */ |
193 | }; |
194 | |
195 | static int qm_eadr_get_memid(const struct qm_eadr *p) |
196 | { |
197 | return (p->info >> 24) & 0xf; |
198 | } |
199 | |
200 | static int qm_eadr_get_eadr(const struct qm_eadr *p) |
201 | { |
202 | return p->info & (BIT(12) - 1); |
203 | } |
204 | |
205 | static int qm_eadr_v3_get_memid(const struct qm_eadr *p) |
206 | { |
207 | return (p->info >> 24) & 0x1f; |
208 | } |
209 | |
210 | static int qm_eadr_v3_get_eadr(const struct qm_eadr *p) |
211 | { |
212 | return p->info & (BIT(16) - 1); |
213 | } |
214 | |
215 | struct qman_hwerr_txt { |
216 | u32 mask; |
217 | const char *txt; |
218 | }; |
219 | |
220 | |
221 | static const struct qman_hwerr_txt qman_hwerr_txts[] = { |
222 | { QM_EIRQ_CIDE, "Corenet Initiator Data Error" }, |
223 | { QM_EIRQ_CTDE, "Corenet Target Data Error" }, |
224 | { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" }, |
225 | { QM_EIRQ_PLWI, "PFDR Low Watermark" }, |
226 | { QM_EIRQ_MBEI, "Multi-bit ECC Error" }, |
227 | { QM_EIRQ_SBEI, "Single-bit ECC Error" }, |
228 | { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" }, |
229 | { QM_EIRQ_ICVI, "Invalid Command Verb" }, |
230 | { QM_EIRQ_IFSI, "Invalid Flow Control State" }, |
231 | { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" }, |
232 | { QM_EIRQ_IDFI, "Invalid Dequeue FQ" }, |
233 | { QM_EIRQ_IDSI, "Invalid Dequeue Source" }, |
234 | { QM_EIRQ_IDQI, "Invalid Dequeue Queue" }, |
235 | { QM_EIRQ_IECE, "Invalid Enqueue Configuration" }, |
236 | { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" }, |
237 | { QM_EIRQ_IESI, "Invalid Enqueue State" }, |
238 | { QM_EIRQ_IECI, "Invalid Enqueue Channel" }, |
239 | { QM_EIRQ_IEQI, "Invalid Enqueue Queue" }, |
240 | }; |
241 | |
242 | struct qman_error_info_mdata { |
243 | u16 addr_mask; |
244 | u16 bits; |
245 | const char *txt; |
246 | }; |
247 | |
248 | static const struct qman_error_info_mdata error_mdata[] = { |
249 | { 0x01FF, 24, "FQD cache tag memory 0" }, |
250 | { 0x01FF, 24, "FQD cache tag memory 1" }, |
251 | { 0x01FF, 24, "FQD cache tag memory 2" }, |
252 | { 0x01FF, 24, "FQD cache tag memory 3" }, |
253 | { 0x0FFF, 512, "FQD cache memory" }, |
254 | { 0x07FF, 128, "SFDR memory" }, |
255 | { 0x01FF, 72, "WQ context memory" }, |
256 | { 0x00FF, 240, "CGR memory" }, |
257 | { 0x00FF, 302, "Internal Order Restoration List memory" }, |
258 | { 0x01FF, 256, "SW portal ring memory" }, |
259 | }; |
260 | |
261 | #define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI) |
262 | |
263 | /* |
264 | * TODO: unimplemented registers |
265 | * |
266 | * Keeping a list here of QMan registers I have not yet covered; |
267 | * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR, |
268 | * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG, |
269 | * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12 |
270 | */ |
271 | |
272 | /* Pointer to the start of the QMan's CCSR space */ |
273 | static u32 __iomem *qm_ccsr_start; |
274 | /* A SDQCR mask comprising all the available/visible pool channels */ |
275 | static u32 qm_pools_sdqcr; |
276 | static int __qman_probed; |
277 | static int __qman_requires_cleanup; |
278 | |
279 | static inline u32 qm_ccsr_in(u32 offset) |
280 | { |
281 | return ioread32be(qm_ccsr_start + offset/4); |
282 | } |
283 | |
284 | static inline void qm_ccsr_out(u32 offset, u32 val) |
285 | { |
286 | iowrite32be(val, qm_ccsr_start + offset/4); |
287 | } |
288 | |
289 | u32 qm_get_pools_sdqcr(void) |
290 | { |
291 | return qm_pools_sdqcr; |
292 | } |
293 | |
294 | enum qm_dc_portal { |
295 | qm_dc_portal_fman0 = 0, |
296 | qm_dc_portal_fman1 = 1 |
297 | }; |
298 | |
299 | static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd) |
300 | { |
301 | DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 || |
302 | portal == qm_dc_portal_fman1); |
303 | if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) |
304 | qm_ccsr_out(REG_DCP_CFG(portal), |
305 | val: (ed ? 0x1000 : 0) | (sernd & 0x3ff)); |
306 | else |
307 | qm_ccsr_out(REG_DCP_CFG(portal), |
308 | val: (ed ? 0x100 : 0) | (sernd & 0x1f)); |
309 | } |
310 | |
311 | static void qm_set_wq_scheduling(enum qm_wq_class wq_class, |
312 | u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, |
313 | u8 csw5, u8 csw6, u8 csw7) |
314 | { |
315 | qm_ccsr_out(REG_WQ_CS_CFG(wq_class), val: ((cs_elev & 0xff) << 24) | |
316 | ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) | |
317 | ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) | |
318 | ((csw6 & 0x7) << 4) | (csw7 & 0x7)); |
319 | } |
320 | |
321 | static void qm_set_hid(void) |
322 | { |
323 | qm_ccsr_out(REG_HID_CFG, val: 0); |
324 | } |
325 | |
326 | static void qm_set_corenet_initiator(void) |
327 | { |
328 | qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN | |
329 | (QM_CI_SCHED_CFG_SRCCIV << 24) | |
330 | (QM_CI_SCHED_CFG_SRQ_W << 8) | |
331 | (QM_CI_SCHED_CFG_RW_W << 4) | |
332 | QM_CI_SCHED_CFG_BMAN_W); |
333 | } |
334 | |
335 | static void qm_get_version(u16 *id, u8 *major, u8 *minor) |
336 | { |
337 | u32 v = qm_ccsr_in(REG_IP_REV_1); |
338 | *id = (v >> 16); |
339 | *major = (v >> 8) & 0xff; |
340 | *minor = v & 0xff; |
341 | } |
342 | |
343 | #define PFDR_AR_EN BIT(31) |
344 | static int qm_set_memory(enum qm_memory memory, u64 ba, u32 size) |
345 | { |
346 | void *ptr; |
347 | u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE; |
348 | u32 exp = ilog2(size); |
349 | u32 bar, bare; |
350 | |
351 | /* choke if size isn't within range */ |
352 | DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) && |
353 | is_power_of_2(size)); |
354 | /* choke if 'ba' has lower-alignment than 'size' */ |
355 | DPAA_ASSERT(!(ba & (size - 1))); |
356 | |
357 | /* Check to see if QMan has already been initialized */ |
358 | bar = qm_ccsr_in(offset: offset + REG_offset_BAR); |
359 | if (bar) { |
360 | /* Maker sure ba == what was programmed) */ |
361 | bare = qm_ccsr_in(offset); |
362 | if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) { |
363 | pr_err("Attempted to reinitialize QMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n" , |
364 | ba, bare, bar); |
365 | return -ENOMEM; |
366 | } |
367 | __qman_requires_cleanup = 1; |
368 | /* Return 1 to indicate memory was previously programmed */ |
369 | return 1; |
370 | } |
371 | /* Need to temporarily map the area to make sure it is zeroed */ |
372 | ptr = memremap(offset: ba, size, flags: MEMREMAP_WB); |
373 | if (!ptr) { |
374 | pr_crit("memremap() of QMan private memory failed\n" ); |
375 | return -ENOMEM; |
376 | } |
377 | memset(ptr, 0, size); |
378 | |
379 | #ifdef CONFIG_PPC |
380 | /* |
381 | * PPC doesn't appear to flush the cache on memunmap() but the |
382 | * cache must be flushed since QMan does non coherent accesses |
383 | * to this memory |
384 | */ |
385 | flush_dcache_range((unsigned long) ptr, (unsigned long) ptr+size); |
386 | #endif |
387 | memunmap(addr: ptr); |
388 | |
389 | qm_ccsr_out(offset, upper_32_bits(ba)); |
390 | qm_ccsr_out(offset: offset + REG_offset_BAR, lower_32_bits(ba)); |
391 | qm_ccsr_out(offset: offset + REG_offset_AR, PFDR_AR_EN | (exp - 1)); |
392 | return 0; |
393 | } |
394 | |
395 | static void qm_set_pfdr_threshold(u32 th, u8 k) |
396 | { |
397 | qm_ccsr_out(REG_PFDR_FP_LWIT, val: th & 0xffffff); |
398 | qm_ccsr_out(REG_PFDR_CFG, val: k); |
399 | } |
400 | |
401 | static void qm_set_sfdr_threshold(u16 th) |
402 | { |
403 | qm_ccsr_out(REG_SFDR_CFG, val: th & 0x3ff); |
404 | } |
405 | |
406 | static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num) |
407 | { |
408 | u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR)); |
409 | |
410 | DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num); |
411 | /* Make sure the command interface is 'idle' */ |
412 | if (!MCR_rslt_idle(rslt)) { |
413 | dev_crit(dev, "QMAN_MCR isn't idle" ); |
414 | WARN_ON(1); |
415 | } |
416 | |
417 | /* Write the MCR command params then the verb */ |
418 | qm_ccsr_out(REG_MCP(0), val: pfdr_start); |
419 | /* |
420 | * TODO: remove this - it's a workaround for a model bug that is |
421 | * corrected in more recent versions. We use the workaround until |
422 | * everyone has upgraded. |
423 | */ |
424 | qm_ccsr_out(REG_MCP(1), val: pfdr_start + num - 16); |
425 | dma_wmb(); |
426 | qm_ccsr_out(REG_MCR, MCR_INIT_PFDR); |
427 | /* Poll for the result */ |
428 | do { |
429 | rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR)); |
430 | } while (!MCR_rslt_idle(rslt)); |
431 | if (MCR_rslt_ok(rslt)) |
432 | return 0; |
433 | if (MCR_rslt_eaccess(rslt)) |
434 | return -EACCES; |
435 | if (MCR_rslt_inval(rslt)) |
436 | return -EINVAL; |
437 | dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n" , rslt); |
438 | return -ENODEV; |
439 | } |
440 | |
441 | /* |
442 | * QMan needs two global memory areas initialized at boot time: |
443 | * 1) FQD: Frame Queue Descriptors used to manage frame queues |
444 | * 2) PFDR: Packed Frame Queue Descriptor Records used to store frames |
445 | * Both areas are reserved using the device tree reserved memory framework |
446 | * and the addresses and sizes are initialized when the QMan device is probed |
447 | */ |
448 | static dma_addr_t fqd_a, pfdr_a; |
449 | static size_t fqd_sz, pfdr_sz; |
450 | |
451 | #ifdef CONFIG_PPC |
452 | /* |
453 | * Support for PPC Device Tree backward compatibility when compatible |
454 | * string is set to fsl-qman-fqd and fsl-qman-pfdr |
455 | */ |
456 | static int zero_priv_mem(phys_addr_t addr, size_t sz) |
457 | { |
458 | /* map as cacheable, non-guarded */ |
459 | void __iomem *tmpp = ioremap_cache(addr, sz); |
460 | |
461 | if (!tmpp) |
462 | return -ENOMEM; |
463 | |
464 | memset_io(tmpp, 0, sz); |
465 | flush_dcache_range((unsigned long)tmpp, |
466 | (unsigned long)tmpp + sz); |
467 | iounmap(tmpp); |
468 | |
469 | return 0; |
470 | } |
471 | #endif |
472 | |
473 | unsigned int qm_get_fqid_maxcnt(void) |
474 | { |
475 | return fqd_sz / 64; |
476 | } |
477 | |
478 | static void log_edata_bits(struct device *dev, u32 bit_count) |
479 | { |
480 | u32 i, j, mask = 0xffffffff; |
481 | |
482 | dev_warn(dev, "ErrInt, EDATA:\n" ); |
483 | i = bit_count / 32; |
484 | if (bit_count % 32) { |
485 | i++; |
486 | mask = ~(mask << bit_count % 32); |
487 | } |
488 | j = 16 - i; |
489 | dev_warn(dev, " 0x%08x\n" , qm_ccsr_in(REG_EDATA(j)) & mask); |
490 | j++; |
491 | for (; j < 16; j++) |
492 | dev_warn(dev, " 0x%08x\n" , qm_ccsr_in(REG_EDATA(j))); |
493 | } |
494 | |
495 | static void log_additional_error_info(struct device *dev, u32 isr_val, |
496 | u32 ecsr_val) |
497 | { |
498 | struct qm_ecir ecir_val; |
499 | struct qm_eadr eadr_val; |
500 | int memid; |
501 | |
502 | ecir_val.info = qm_ccsr_in(REG_ECIR); |
503 | /* Is portal info valid */ |
504 | if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { |
505 | struct qm_ecir2 ecir2_val; |
506 | |
507 | ecir2_val.info = qm_ccsr_in(REG_ECIR2); |
508 | if (ecsr_val & PORTAL_ECSR_ERR) { |
509 | dev_warn(dev, "ErrInt: %s id %d\n" , |
510 | qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP" , |
511 | qm_ecir2_get_pnum(&ecir2_val)); |
512 | } |
513 | if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE)) |
514 | dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n" , |
515 | qm_ecir_get_fqid(&ecir_val)); |
516 | |
517 | if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { |
518 | eadr_val.info = qm_ccsr_in(REG_EADR); |
519 | memid = qm_eadr_v3_get_memid(p: &eadr_val); |
520 | dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n" , |
521 | error_mdata[memid].txt, |
522 | error_mdata[memid].addr_mask |
523 | & qm_eadr_v3_get_eadr(&eadr_val)); |
524 | log_edata_bits(dev, bit_count: error_mdata[memid].bits); |
525 | } |
526 | } else { |
527 | if (ecsr_val & PORTAL_ECSR_ERR) { |
528 | dev_warn(dev, "ErrInt: %s id %d\n" , |
529 | qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP" , |
530 | qm_ecir_get_pnum(&ecir_val)); |
531 | } |
532 | if (ecsr_val & FQID_ECSR_ERR) |
533 | dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n" , |
534 | qm_ecir_get_fqid(&ecir_val)); |
535 | |
536 | if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { |
537 | eadr_val.info = qm_ccsr_in(REG_EADR); |
538 | memid = qm_eadr_get_memid(p: &eadr_val); |
539 | dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n" , |
540 | error_mdata[memid].txt, |
541 | error_mdata[memid].addr_mask |
542 | & qm_eadr_get_eadr(&eadr_val)); |
543 | log_edata_bits(dev, bit_count: error_mdata[memid].bits); |
544 | } |
545 | } |
546 | } |
547 | |
548 | static irqreturn_t qman_isr(int irq, void *ptr) |
549 | { |
550 | u32 isr_val, ier_val, ecsr_val, isr_mask, i; |
551 | struct device *dev = ptr; |
552 | |
553 | ier_val = qm_ccsr_in(REG_ERR_IER); |
554 | isr_val = qm_ccsr_in(REG_ERR_ISR); |
555 | ecsr_val = qm_ccsr_in(REG_ECSR); |
556 | isr_mask = isr_val & ier_val; |
557 | |
558 | if (!isr_mask) |
559 | return IRQ_NONE; |
560 | |
561 | for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) { |
562 | if (qman_hwerr_txts[i].mask & isr_mask) { |
563 | dev_err_ratelimited(dev, "ErrInt: %s\n" , |
564 | qman_hwerr_txts[i].txt); |
565 | if (qman_hwerr_txts[i].mask & ecsr_val) { |
566 | log_additional_error_info(dev, isr_val: isr_mask, |
567 | ecsr_val); |
568 | /* Re-arm error capture registers */ |
569 | qm_ccsr_out(REG_ECSR, val: ecsr_val); |
570 | } |
571 | if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) { |
572 | dev_dbg(dev, "Disabling error 0x%x\n" , |
573 | qman_hwerr_txts[i].mask); |
574 | ier_val &= ~qman_hwerr_txts[i].mask; |
575 | qm_ccsr_out(REG_ERR_IER, val: ier_val); |
576 | } |
577 | } |
578 | } |
579 | qm_ccsr_out(REG_ERR_ISR, val: isr_val); |
580 | |
581 | return IRQ_HANDLED; |
582 | } |
583 | |
584 | static int qman_init_ccsr(struct device *dev) |
585 | { |
586 | int i, err; |
587 | |
588 | /* FQD memory */ |
589 | err = qm_set_memory(memory: qm_memory_fqd, ba: fqd_a, size: fqd_sz); |
590 | if (err < 0) |
591 | return err; |
592 | /* PFDR memory */ |
593 | err = qm_set_memory(memory: qm_memory_pfdr, ba: pfdr_a, size: pfdr_sz); |
594 | if (err < 0) |
595 | return err; |
596 | /* Only initialize PFDRs if the QMan was not initialized before */ |
597 | if (err == 0) { |
598 | err = qm_init_pfdr(dev, pfdr_start: 8, num: pfdr_sz / 64 - 8); |
599 | if (err) |
600 | return err; |
601 | } |
602 | /* thresholds */ |
603 | qm_set_pfdr_threshold(th: 512, k: 64); |
604 | qm_set_sfdr_threshold(th: 128); |
605 | /* clear stale PEBI bit from interrupt status register */ |
606 | qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI); |
607 | /* corenet initiator settings */ |
608 | qm_set_corenet_initiator(); |
609 | /* HID settings */ |
610 | qm_set_hid(); |
611 | /* Set scheduling weights to defaults */ |
612 | for (i = qm_wq_first; i <= qm_wq_last; i++) |
613 | qm_set_wq_scheduling(wq_class: i, cs_elev: 0, csw2: 0, csw3: 0, csw4: 0, csw5: 0, csw6: 0, csw7: 0); |
614 | /* We are not prepared to accept ERNs for hardware enqueues */ |
615 | qm_set_dc(portal: qm_dc_portal_fman0, ed: 1, sernd: 0); |
616 | qm_set_dc(portal: qm_dc_portal_fman1, ed: 1, sernd: 0); |
617 | return 0; |
618 | } |
619 | |
620 | #define LIO_CFG_LIODN_MASK 0x0fff0000 |
621 | void __qman_liodn_fixup(u16 channel) |
622 | { |
623 | static int done; |
624 | static u32 liodn_offset; |
625 | u32 before, after; |
626 | int idx = channel - QM_CHANNEL_SWPORTAL0; |
627 | |
628 | if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) |
629 | before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx)); |
630 | else |
631 | before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx)); |
632 | if (!done) { |
633 | liodn_offset = before & LIO_CFG_LIODN_MASK; |
634 | done = 1; |
635 | return; |
636 | } |
637 | after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset; |
638 | if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) |
639 | qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), val: after); |
640 | else |
641 | qm_ccsr_out(REG_QCSP_LIO_CFG(idx), val: after); |
642 | } |
643 | |
644 | #define IO_CFG_SDEST_MASK 0x00ff0000 |
645 | void qman_set_sdest(u16 channel, unsigned int cpu_idx) |
646 | { |
647 | int idx = channel - QM_CHANNEL_SWPORTAL0; |
648 | u32 before, after; |
649 | |
650 | if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { |
651 | before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx)); |
652 | /* Each pair of vcpu share the same SRQ(SDEST) */ |
653 | cpu_idx /= 2; |
654 | after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); |
655 | qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), val: after); |
656 | } else { |
657 | before = qm_ccsr_in(REG_QCSP_IO_CFG(idx)); |
658 | after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); |
659 | qm_ccsr_out(REG_QCSP_IO_CFG(idx), val: after); |
660 | } |
661 | } |
662 | |
663 | static int qman_resource_init(struct device *dev) |
664 | { |
665 | int pool_chan_num, cgrid_num; |
666 | int ret, i; |
667 | |
668 | switch (qman_ip_rev >> 8) { |
669 | case 1: |
670 | pool_chan_num = 15; |
671 | cgrid_num = 256; |
672 | break; |
673 | case 2: |
674 | pool_chan_num = 3; |
675 | cgrid_num = 64; |
676 | break; |
677 | case 3: |
678 | pool_chan_num = 15; |
679 | cgrid_num = 256; |
680 | break; |
681 | default: |
682 | return -ENODEV; |
683 | } |
684 | |
685 | ret = gen_pool_add(pool: qm_qpalloc, addr: qm_channel_pool1 | DPAA_GENALLOC_OFF, |
686 | size: pool_chan_num, nid: -1); |
687 | if (ret) { |
688 | dev_err(dev, "Failed to seed pool channels (%d)\n" , ret); |
689 | return ret; |
690 | } |
691 | |
692 | ret = gen_pool_add(pool: qm_cgralloc, DPAA_GENALLOC_OFF, size: cgrid_num, nid: -1); |
693 | if (ret) { |
694 | dev_err(dev, "Failed to seed CGRID range (%d)\n" , ret); |
695 | return ret; |
696 | } |
697 | |
698 | /* parse pool channels into the SDQCR mask */ |
699 | for (i = 0; i < cgrid_num; i++) |
700 | qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(channel: i); |
701 | |
702 | ret = gen_pool_add(pool: qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF, |
703 | size: qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, nid: -1); |
704 | if (ret) { |
705 | dev_err(dev, "Failed to seed FQID range (%d)\n" , ret); |
706 | return ret; |
707 | } |
708 | |
709 | return 0; |
710 | } |
711 | |
712 | int qman_is_probed(void) |
713 | { |
714 | return __qman_probed; |
715 | } |
716 | EXPORT_SYMBOL_GPL(qman_is_probed); |
717 | |
718 | int qman_requires_cleanup(void) |
719 | { |
720 | return __qman_requires_cleanup; |
721 | } |
722 | |
723 | void qman_done_cleanup(void) |
724 | { |
725 | qman_enable_irqs(); |
726 | __qman_requires_cleanup = 0; |
727 | } |
728 | |
729 | |
730 | static int fsl_qman_probe(struct platform_device *pdev) |
731 | { |
732 | struct device *dev = &pdev->dev; |
733 | struct device_node *node = dev->of_node; |
734 | struct resource *res; |
735 | int ret, err_irq; |
736 | u16 id; |
737 | u8 major, minor; |
738 | |
739 | __qman_probed = -1; |
740 | |
741 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
742 | if (!res) { |
743 | dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n" , |
744 | node); |
745 | return -ENXIO; |
746 | } |
747 | qm_ccsr_start = devm_ioremap(dev, offset: res->start, size: resource_size(res)); |
748 | if (!qm_ccsr_start) |
749 | return -ENXIO; |
750 | |
751 | qm_get_version(id: &id, major: &major, minor: &minor); |
752 | if (major == 1 && minor == 0) { |
753 | dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n" ); |
754 | return -ENODEV; |
755 | } else if (major == 1 && minor == 1) |
756 | qman_ip_rev = QMAN_REV11; |
757 | else if (major == 1 && minor == 2) |
758 | qman_ip_rev = QMAN_REV12; |
759 | else if (major == 2 && minor == 0) |
760 | qman_ip_rev = QMAN_REV20; |
761 | else if (major == 3 && minor == 0) |
762 | qman_ip_rev = QMAN_REV30; |
763 | else if (major == 3 && minor == 1) |
764 | qman_ip_rev = QMAN_REV31; |
765 | else if (major == 3 && minor == 2) |
766 | qman_ip_rev = QMAN_REV32; |
767 | else { |
768 | dev_err(dev, "Unknown QMan version\n" ); |
769 | return -ENODEV; |
770 | } |
771 | |
772 | if ((qman_ip_rev & 0xff00) >= QMAN_REV30) { |
773 | qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3; |
774 | qm_channel_caam = QMAN_CHANNEL_CAAM_REV3; |
775 | } |
776 | |
777 | /* |
778 | * Order of memory regions is assumed as FQD followed by PFDR |
779 | * in order to ensure allocations from the correct regions the |
780 | * driver initializes then allocates each piece in order |
781 | */ |
782 | ret = qbman_init_private_mem(dev, idx: 0, compat: "fsl,qman-fqd" , addr: &fqd_a, size: &fqd_sz); |
783 | if (ret) { |
784 | dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n" , |
785 | ret); |
786 | return -ENODEV; |
787 | } |
788 | #ifdef CONFIG_PPC |
789 | /* |
790 | * For PPC backward DT compatibility |
791 | * FQD memory MUST be zero'd by software |
792 | */ |
793 | zero_priv_mem(fqd_a, fqd_sz); |
794 | #else |
795 | WARN(1, "Unexpected architecture using non shared-dma-mem reservations" ); |
796 | #endif |
797 | dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n" , fqd_a, fqd_sz); |
798 | |
799 | /* Setup PFDR memory */ |
800 | ret = qbman_init_private_mem(dev, idx: 1, compat: "fsl,qman-pfdr" , addr: &pfdr_a, size: &pfdr_sz); |
801 | if (ret) { |
802 | dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n" , |
803 | ret); |
804 | return -ENODEV; |
805 | } |
806 | dev_dbg(dev, "Allocated PFDR 0x%llx 0x%zx\n" , pfdr_a, pfdr_sz); |
807 | |
808 | ret = qman_init_ccsr(dev); |
809 | if (ret) { |
810 | dev_err(dev, "CCSR setup failed\n" ); |
811 | return ret; |
812 | } |
813 | |
814 | err_irq = platform_get_irq(pdev, 0); |
815 | if (err_irq <= 0) { |
816 | dev_info(dev, "Can't get %pOF property 'interrupts'\n" , |
817 | node); |
818 | return -ENODEV; |
819 | } |
820 | ret = devm_request_irq(dev, irq: err_irq, handler: qman_isr, IRQF_SHARED, devname: "qman-err" , |
821 | dev_id: dev); |
822 | if (ret) { |
823 | dev_err(dev, "devm_request_irq() failed %d for '%pOF'\n" , |
824 | ret, node); |
825 | return ret; |
826 | } |
827 | |
828 | /* |
829 | * Write-to-clear any stale bits, (eg. starvation being asserted prior |
830 | * to resource allocation during driver init). |
831 | */ |
832 | qm_ccsr_out(REG_ERR_ISR, val: 0xffffffff); |
833 | /* Enable Error Interrupts */ |
834 | qm_ccsr_out(REG_ERR_IER, val: 0xffffffff); |
835 | |
836 | qm_fqalloc = devm_gen_pool_create(dev, min_alloc_order: 0, nid: -1, name: "qman-fqalloc" ); |
837 | if (IS_ERR(ptr: qm_fqalloc)) { |
838 | ret = PTR_ERR(ptr: qm_fqalloc); |
839 | dev_err(dev, "qman-fqalloc pool init failed (%d)\n" , ret); |
840 | return ret; |
841 | } |
842 | |
843 | qm_qpalloc = devm_gen_pool_create(dev, min_alloc_order: 0, nid: -1, name: "qman-qpalloc" ); |
844 | if (IS_ERR(ptr: qm_qpalloc)) { |
845 | ret = PTR_ERR(ptr: qm_qpalloc); |
846 | dev_err(dev, "qman-qpalloc pool init failed (%d)\n" , ret); |
847 | return ret; |
848 | } |
849 | |
850 | qm_cgralloc = devm_gen_pool_create(dev, min_alloc_order: 0, nid: -1, name: "qman-cgralloc" ); |
851 | if (IS_ERR(ptr: qm_cgralloc)) { |
852 | ret = PTR_ERR(ptr: qm_cgralloc); |
853 | dev_err(dev, "qman-cgralloc pool init failed (%d)\n" , ret); |
854 | return ret; |
855 | } |
856 | |
857 | ret = qman_resource_init(dev); |
858 | if (ret) |
859 | return ret; |
860 | |
861 | ret = qman_alloc_fq_table(num_fqids: qm_get_fqid_maxcnt()); |
862 | if (ret) |
863 | return ret; |
864 | |
865 | ret = qman_wq_alloc(); |
866 | if (ret) |
867 | return ret; |
868 | |
869 | __qman_probed = 1; |
870 | |
871 | return 0; |
872 | } |
873 | |
874 | static const struct of_device_id fsl_qman_ids[] = { |
875 | { |
876 | .compatible = "fsl,qman" , |
877 | }, |
878 | {} |
879 | }; |
880 | |
881 | static struct platform_driver fsl_qman_driver = { |
882 | .driver = { |
883 | .name = KBUILD_MODNAME, |
884 | .of_match_table = fsl_qman_ids, |
885 | .suppress_bind_attrs = true, |
886 | }, |
887 | .probe = fsl_qman_probe, |
888 | }; |
889 | |
890 | builtin_platform_driver(fsl_qman_driver); |
891 | |