1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * ARC Cache Management |
4 | * |
5 | * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) |
6 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
7 | */ |
8 | |
9 | #include <linux/module.h> |
10 | #include <linux/mm.h> |
11 | #include <linux/sched.h> |
12 | #include <linux/cache.h> |
13 | #include <linux/mmu_context.h> |
14 | #include <linux/syscalls.h> |
15 | #include <linux/uaccess.h> |
16 | #include <linux/pagemap.h> |
17 | #include <asm/cacheflush.h> |
18 | #include <asm/cachectl.h> |
19 | #include <asm/setup.h> |
20 | |
21 | #ifdef CONFIG_ISA_ARCV2 |
22 | #define USE_RGN_FLSH 1 |
23 | #endif |
24 | |
25 | static int l2_line_sz; |
26 | static int ioc_exists; |
27 | int slc_enable = 1, ioc_enable = 1; |
28 | unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */ |
29 | unsigned long perip_end = 0xFFFFFFFF; /* legacy value */ |
30 | |
31 | static struct cpuinfo_arc_cache { |
32 | unsigned int sz_k, line_len, colors; |
33 | } ic_info, dc_info, slc_info; |
34 | |
35 | void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr, |
36 | unsigned long sz, const int op, const int full_page); |
37 | |
38 | void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz); |
39 | void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz); |
40 | void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz); |
41 | |
42 | static int read_decode_cache_bcr_arcv2(int c, char *buf, int len) |
43 | { |
44 | struct cpuinfo_arc_cache *p_slc = &slc_info; |
45 | struct bcr_identity ident; |
46 | struct bcr_generic sbcr; |
47 | struct bcr_clust_cfg cbcr; |
48 | struct bcr_volatile vol; |
49 | int n = 0; |
50 | |
51 | READ_BCR(ARC_REG_SLC_BCR, sbcr); |
52 | if (sbcr.ver) { |
53 | struct bcr_slc_cfg slc_cfg; |
54 | READ_BCR(ARC_REG_SLC_CFG, slc_cfg); |
55 | p_slc->sz_k = 128 << slc_cfg.sz; |
56 | l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64; |
57 | n += scnprintf(buf: buf + n, size: len - n, |
58 | fmt: "SLC\t\t: %uK, %uB Line%s\n" , |
59 | p_slc->sz_k, p_slc->line_len, IS_USED_RUN(slc_enable)); |
60 | } |
61 | |
62 | READ_BCR(ARC_REG_CLUSTER_BCR, cbcr); |
63 | if (cbcr.c) { |
64 | ioc_exists = 1; |
65 | |
66 | /* |
67 | * As for today we don't support both IOC and ZONE_HIGHMEM enabled |
68 | * simultaneously. This happens because as of today IOC aperture covers |
69 | * only ZONE_NORMAL (low mem) and any dma transactions outside this |
70 | * region won't be HW coherent. |
71 | * If we want to use both IOC and ZONE_HIGHMEM we can use |
72 | * bounce_buffer to handle dma transactions to HIGHMEM. |
73 | * Also it is possible to modify dma_direct cache ops or increase IOC |
74 | * aperture size if we are planning to use HIGHMEM without PAE. |
75 | */ |
76 | if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled()) |
77 | ioc_enable = 0; |
78 | } else { |
79 | ioc_enable = 0; |
80 | } |
81 | |
82 | READ_BCR(AUX_IDENTITY, ident); |
83 | |
84 | /* HS 2.0 didn't have AUX_VOL */ |
85 | if (ident.family > 0x51) { |
86 | READ_BCR(AUX_VOL, vol); |
87 | perip_base = vol.start << 28; |
88 | /* HS 3.0 has limit and strict-ordering fields */ |
89 | if (ident.family > 0x52) |
90 | perip_end = (vol.limit << 28) - 1; |
91 | } |
92 | |
93 | n += scnprintf(buf: buf + n, size: len - n, fmt: "Peripherals\t: %#lx%s%s\n" , |
94 | perip_base, |
95 | IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) " )); |
96 | |
97 | return n; |
98 | } |
99 | |
100 | int arc_cache_mumbojumbo(int c, char *buf, int len) |
101 | { |
102 | struct cpuinfo_arc_cache *p_ic = &ic_info, *p_dc = &dc_info; |
103 | struct bcr_cache ibcr, dbcr; |
104 | int vipt, assoc; |
105 | int n = 0; |
106 | |
107 | READ_BCR(ARC_REG_IC_BCR, ibcr); |
108 | if (!ibcr.ver) |
109 | goto dc_chk; |
110 | |
111 | if (is_isa_arcompact() && (ibcr.ver <= 3)) { |
112 | BUG_ON(ibcr.config != 3); |
113 | assoc = 2; /* Fixed to 2w set assoc */ |
114 | } else if (is_isa_arcv2() && (ibcr.ver >= 4)) { |
115 | assoc = 1 << ibcr.config; /* 1,2,4,8 */ |
116 | } |
117 | |
118 | p_ic->line_len = 8 << ibcr.line_len; |
119 | p_ic->sz_k = 1 << (ibcr.sz - 1); |
120 | p_ic->colors = p_ic->sz_k/assoc/TO_KB(PAGE_SIZE); |
121 | |
122 | n += scnprintf(buf: buf + n, size: len - n, |
123 | fmt: "I-Cache\t\t: %uK, %dway/set, %uB Line, VIPT%s%s\n" , |
124 | p_ic->sz_k, assoc, p_ic->line_len, |
125 | p_ic->colors > 1 ? " aliasing" : "" , |
126 | IS_USED_CFG(CONFIG_ARC_HAS_ICACHE)); |
127 | |
128 | dc_chk: |
129 | READ_BCR(ARC_REG_DC_BCR, dbcr); |
130 | if (!dbcr.ver) |
131 | goto slc_chk; |
132 | |
133 | if (is_isa_arcompact() && (dbcr.ver <= 3)) { |
134 | BUG_ON(dbcr.config != 2); |
135 | vipt = 1; |
136 | assoc = 4; /* Fixed to 4w set assoc */ |
137 | p_dc->colors = p_dc->sz_k/assoc/TO_KB(PAGE_SIZE); |
138 | } else if (is_isa_arcv2() && (dbcr.ver >= 4)) { |
139 | vipt = 0; |
140 | assoc = 1 << dbcr.config; /* 1,2,4,8 */ |
141 | p_dc->colors = 1; /* PIPT so can't VIPT alias */ |
142 | } |
143 | |
144 | p_dc->line_len = 16 << dbcr.line_len; |
145 | p_dc->sz_k = 1 << (dbcr.sz - 1); |
146 | |
147 | n += scnprintf(buf: buf + n, size: len - n, |
148 | fmt: "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s\n" , |
149 | p_dc->sz_k, assoc, p_dc->line_len, |
150 | vipt ? "VIPT" : "PIPT" , |
151 | IS_USED_CFG(CONFIG_ARC_HAS_DCACHE)); |
152 | |
153 | slc_chk: |
154 | if (is_isa_arcv2()) |
155 | n += read_decode_cache_bcr_arcv2(c, buf: buf + n, len: len - n); |
156 | |
157 | return n; |
158 | } |
159 | |
160 | /* |
161 | * Line Operation on {I,D}-Cache |
162 | */ |
163 | |
164 | #define OP_INV 0x1 |
165 | #define OP_FLUSH 0x2 |
166 | #define OP_FLUSH_N_INV 0x3 |
167 | #define OP_INV_IC 0x4 |
168 | |
169 | /* |
170 | * Cache Flush programming model |
171 | * |
172 | * ARC700 MMUv3 I$ and D$ are both VIPT and can potentially alias. |
173 | * Programming model requires both paddr and vaddr irrespecive of aliasing |
174 | * considerations: |
175 | * - vaddr in {I,D}C_IV?L |
176 | * - paddr in {I,D}C_PTAG |
177 | * |
178 | * In HS38x (MMUv4), D$ is PIPT, I$ is VIPT and can still alias. |
179 | * Programming model is different for aliasing vs. non-aliasing I$ |
180 | * - D$ / Non-aliasing I$: only paddr in {I,D}C_IV?L |
181 | * - Aliasing I$: same as ARC700 above (so MMUv3 routine used for MMUv4 I$) |
182 | * |
183 | * - If PAE40 is enabled, independent of aliasing considerations, the higher |
184 | * bits needs to be written into PTAG_HI |
185 | */ |
186 | |
187 | static inline |
188 | void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, |
189 | unsigned long sz, const int op, const int full_page) |
190 | { |
191 | unsigned int aux_cmd, aux_tag; |
192 | int num_lines; |
193 | |
194 | if (op == OP_INV_IC) { |
195 | aux_cmd = ARC_REG_IC_IVIL; |
196 | aux_tag = ARC_REG_IC_PTAG; |
197 | } else { |
198 | aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; |
199 | aux_tag = ARC_REG_DC_PTAG; |
200 | } |
201 | |
202 | /* Ensure we properly floor/ceil the non-line aligned/sized requests |
203 | * and have @paddr - aligned to cache line and integral @num_lines. |
204 | * This however can be avoided for page sized since: |
205 | * -@paddr will be cache-line aligned already (being page aligned) |
206 | * -@sz will be integral multiple of line size (being page sized). |
207 | */ |
208 | if (!full_page) { |
209 | sz += paddr & ~CACHE_LINE_MASK; |
210 | paddr &= CACHE_LINE_MASK; |
211 | vaddr &= CACHE_LINE_MASK; |
212 | } |
213 | num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); |
214 | |
215 | /* |
216 | * MMUv3, cache ops require paddr in PTAG reg |
217 | * if V-P const for loop, PTAG can be written once outside loop |
218 | */ |
219 | if (full_page) |
220 | write_aux_reg(aux_tag, paddr); |
221 | |
222 | /* |
223 | * This is technically for MMU v4, using the MMU v3 programming model |
224 | * Special work for HS38 aliasing I-cache configuration with PAE40 |
225 | * - upper 8 bits of paddr need to be written into PTAG_HI |
226 | * - (and needs to be written before the lower 32 bits) |
227 | * Note that PTAG_HI is hoisted outside the line loop |
228 | */ |
229 | if (is_pae40_enabled() && op == OP_INV_IC) |
230 | write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32); |
231 | |
232 | while (num_lines-- > 0) { |
233 | if (!full_page) { |
234 | write_aux_reg(aux_tag, paddr); |
235 | paddr += L1_CACHE_BYTES; |
236 | } |
237 | |
238 | write_aux_reg(aux_cmd, vaddr); |
239 | vaddr += L1_CACHE_BYTES; |
240 | } |
241 | } |
242 | |
243 | #ifndef USE_RGN_FLSH |
244 | |
245 | /* |
246 | */ |
247 | static inline |
248 | void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr, |
249 | unsigned long sz, const int op, const int full_page) |
250 | { |
251 | unsigned int aux_cmd; |
252 | int num_lines; |
253 | |
254 | if (op == OP_INV_IC) { |
255 | aux_cmd = ARC_REG_IC_IVIL; |
256 | } else { |
257 | /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ |
258 | aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; |
259 | } |
260 | |
261 | /* Ensure we properly floor/ceil the non-line aligned/sized requests |
262 | * and have @paddr - aligned to cache line and integral @num_lines. |
263 | * This however can be avoided for page sized since: |
264 | * -@paddr will be cache-line aligned already (being page aligned) |
265 | * -@sz will be integral multiple of line size (being page sized). |
266 | */ |
267 | if (!full_page) { |
268 | sz += paddr & ~CACHE_LINE_MASK; |
269 | paddr &= CACHE_LINE_MASK; |
270 | } |
271 | |
272 | num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); |
273 | |
274 | /* |
275 | * For HS38 PAE40 configuration |
276 | * - upper 8 bits of paddr need to be written into PTAG_HI |
277 | * - (and needs to be written before the lower 32 bits) |
278 | */ |
279 | if (is_pae40_enabled()) { |
280 | if (op == OP_INV_IC) |
281 | /* |
282 | * Non aliasing I-cache in HS38, |
283 | * aliasing I-cache handled in __cache_line_loop_v3() |
284 | */ |
285 | write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32); |
286 | else |
287 | write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32); |
288 | } |
289 | |
290 | while (num_lines-- > 0) { |
291 | write_aux_reg(aux_cmd, paddr); |
292 | paddr += L1_CACHE_BYTES; |
293 | } |
294 | } |
295 | |
296 | #else |
297 | |
298 | /* |
299 | * optimized flush operation which takes a region as opposed to iterating per line |
300 | */ |
301 | static inline |
302 | void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr, |
303 | unsigned long sz, const int op, const int full_page) |
304 | { |
305 | unsigned int s, e; |
306 | |
307 | /* Only for Non aliasing I-cache in HS38 */ |
308 | if (op == OP_INV_IC) { |
309 | s = ARC_REG_IC_IVIR; |
310 | e = ARC_REG_IC_ENDR; |
311 | } else { |
312 | s = ARC_REG_DC_STARTR; |
313 | e = ARC_REG_DC_ENDR; |
314 | } |
315 | |
316 | if (!full_page) { |
317 | /* for any leading gap between @paddr and start of cache line */ |
318 | sz += paddr & ~CACHE_LINE_MASK; |
319 | paddr &= CACHE_LINE_MASK; |
320 | |
321 | /* |
322 | * account for any trailing gap to end of cache line |
323 | * this is equivalent to DIV_ROUND_UP() in line ops above |
324 | */ |
325 | sz += L1_CACHE_BYTES - 1; |
326 | } |
327 | |
328 | if (is_pae40_enabled()) { |
329 | /* TBD: check if crossing 4TB boundary */ |
330 | if (op == OP_INV_IC) |
331 | write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32); |
332 | else |
333 | write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32); |
334 | } |
335 | |
336 | /* ENDR needs to be set ahead of START */ |
337 | write_aux_reg(e, paddr + sz); /* ENDR is exclusive */ |
338 | write_aux_reg(s, paddr); |
339 | |
340 | /* caller waits on DC_CTRL.FS */ |
341 | } |
342 | |
343 | #endif |
344 | |
345 | #ifdef CONFIG_ARC_MMU_V3 |
346 | #define __cache_line_loop __cache_line_loop_v3 |
347 | #else |
348 | #define __cache_line_loop __cache_line_loop_v4 |
349 | #endif |
350 | |
351 | #ifdef CONFIG_ARC_HAS_DCACHE |
352 | |
353 | /*************************************************************** |
354 | * Machine specific helpers for Entire D-Cache or Per Line ops |
355 | */ |
356 | |
357 | #ifndef USE_RGN_FLSH |
358 | /* |
359 | * this version avoids extra read/write of DC_CTRL for flush or invalid ops |
360 | * in the non region flush regime (such as for ARCompact) |
361 | */ |
362 | static inline void __before_dc_op(const int op) |
363 | { |
364 | if (op == OP_FLUSH_N_INV) { |
365 | /* Dcache provides 2 cmd: FLUSH or INV |
366 | * INV in turn has sub-modes: DISCARD or FLUSH-BEFORE |
367 | * flush-n-inv is achieved by INV cmd but with IM=1 |
368 | * So toggle INV sub-mode depending on op request and default |
369 | */ |
370 | const unsigned int ctl = ARC_REG_DC_CTRL; |
371 | write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH); |
372 | } |
373 | } |
374 | |
375 | #else |
376 | |
377 | static inline void __before_dc_op(const int op) |
378 | { |
379 | const unsigned int ctl = ARC_REG_DC_CTRL; |
380 | unsigned int val = read_aux_reg(ctl); |
381 | |
382 | if (op == OP_FLUSH_N_INV) { |
383 | val |= DC_CTRL_INV_MODE_FLUSH; |
384 | } |
385 | |
386 | if (op != OP_INV_IC) { |
387 | /* |
388 | * Flush / Invalidate is provided by DC_CTRL.RNG_OP 0 or 1 |
389 | * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above |
390 | */ |
391 | val &= ~DC_CTRL_RGN_OP_MSK; |
392 | if (op & OP_INV) |
393 | val |= DC_CTRL_RGN_OP_INV; |
394 | } |
395 | write_aux_reg(ctl, val); |
396 | } |
397 | |
398 | #endif |
399 | |
400 | |
401 | static inline void __after_dc_op(const int op) |
402 | { |
403 | if (op & OP_FLUSH) { |
404 | const unsigned int ctl = ARC_REG_DC_CTRL; |
405 | unsigned int reg; |
406 | |
407 | /* flush / flush-n-inv both wait */ |
408 | while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS) |
409 | ; |
410 | |
411 | /* Switch back to default Invalidate mode */ |
412 | if (op == OP_FLUSH_N_INV) |
413 | write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH); |
414 | } |
415 | } |
416 | |
417 | /* |
418 | * Operation on Entire D-Cache |
419 | * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV} |
420 | * Note that constant propagation ensures all the checks are gone |
421 | * in generated code |
422 | */ |
423 | static inline void __dc_entire_op(const int op) |
424 | { |
425 | int aux; |
426 | |
427 | __before_dc_op(op); |
428 | |
429 | if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ |
430 | aux = ARC_REG_DC_IVDC; |
431 | else |
432 | aux = ARC_REG_DC_FLSH; |
433 | |
434 | write_aux_reg(aux, 0x1); |
435 | |
436 | __after_dc_op(op); |
437 | } |
438 | |
439 | static inline void __dc_disable(void) |
440 | { |
441 | const int r = ARC_REG_DC_CTRL; |
442 | |
443 | __dc_entire_op(OP_FLUSH_N_INV); |
444 | write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS); |
445 | } |
446 | |
447 | static void __dc_enable(void) |
448 | { |
449 | const int r = ARC_REG_DC_CTRL; |
450 | |
451 | write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS); |
452 | } |
453 | |
454 | /* For kernel mappings cache operation: index is same as paddr */ |
455 | #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) |
456 | |
457 | /* |
458 | * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback) |
459 | */ |
460 | static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr, |
461 | unsigned long sz, const int op) |
462 | { |
463 | const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE; |
464 | unsigned long flags; |
465 | |
466 | local_irq_save(flags); |
467 | |
468 | __before_dc_op(op); |
469 | |
470 | __cache_line_loop(paddr, vaddr, sz, op, full_page); |
471 | |
472 | __after_dc_op(op); |
473 | |
474 | local_irq_restore(flags); |
475 | } |
476 | |
477 | #else |
478 | |
479 | #define __dc_entire_op(op) |
480 | #define __dc_disable() |
481 | #define __dc_enable() |
482 | #define __dc_line_op(paddr, vaddr, sz, op) |
483 | #define __dc_line_op_k(paddr, sz, op) |
484 | |
485 | #endif /* CONFIG_ARC_HAS_DCACHE */ |
486 | |
487 | #ifdef CONFIG_ARC_HAS_ICACHE |
488 | |
489 | static inline void __ic_entire_inv(void) |
490 | { |
491 | write_aux_reg(ARC_REG_IC_IVIC, 1); |
492 | read_aux_reg(ARC_REG_IC_CTRL); /* blocks */ |
493 | } |
494 | |
495 | static inline void |
496 | __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr, |
497 | unsigned long sz) |
498 | { |
499 | const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE; |
500 | unsigned long flags; |
501 | |
502 | local_irq_save(flags); |
503 | (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page); |
504 | local_irq_restore(flags); |
505 | } |
506 | |
507 | #ifndef CONFIG_SMP |
508 | |
509 | #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s) |
510 | |
511 | #else |
512 | |
513 | struct ic_inv_args { |
514 | phys_addr_t paddr, vaddr; |
515 | int sz; |
516 | }; |
517 | |
518 | static void __ic_line_inv_vaddr_helper(void *info) |
519 | { |
520 | struct ic_inv_args *ic_inv = info; |
521 | |
522 | __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); |
523 | } |
524 | |
525 | static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr, |
526 | unsigned long sz) |
527 | { |
528 | struct ic_inv_args ic_inv = { |
529 | .paddr = paddr, |
530 | .vaddr = vaddr, |
531 | .sz = sz |
532 | }; |
533 | |
534 | on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1); |
535 | } |
536 | |
537 | #endif /* CONFIG_SMP */ |
538 | |
539 | #else /* !CONFIG_ARC_HAS_ICACHE */ |
540 | |
541 | #define __ic_entire_inv() |
542 | #define __ic_line_inv_vaddr(pstart, vstart, sz) |
543 | |
544 | #endif /* CONFIG_ARC_HAS_ICACHE */ |
545 | |
546 | static noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op) |
547 | { |
548 | #ifdef CONFIG_ISA_ARCV2 |
549 | /* |
550 | * SLC is shared between all cores and concurrent aux operations from |
551 | * multiple cores need to be serialized using a spinlock |
552 | * A concurrent operation can be silently ignored and/or the old/new |
553 | * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop |
554 | * below) |
555 | */ |
556 | static DEFINE_SPINLOCK(lock); |
557 | unsigned long flags; |
558 | unsigned int ctrl; |
559 | phys_addr_t end; |
560 | |
561 | spin_lock_irqsave(&lock, flags); |
562 | |
563 | /* |
564 | * The Region Flush operation is specified by CTRL.RGN_OP[11..9] |
565 | * - b'000 (default) is Flush, |
566 | * - b'001 is Invalidate if CTRL.IM == 0 |
567 | * - b'001 is Flush-n-Invalidate if CTRL.IM == 1 |
568 | */ |
569 | ctrl = read_aux_reg(ARC_REG_SLC_CTRL); |
570 | |
571 | /* Don't rely on default value of IM bit */ |
572 | if (!(op & OP_FLUSH)) /* i.e. OP_INV */ |
573 | ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ |
574 | else |
575 | ctrl |= SLC_CTRL_IM; |
576 | |
577 | if (op & OP_INV) |
578 | ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */ |
579 | else |
580 | ctrl &= ~SLC_CTRL_RGN_OP_INV; |
581 | |
582 | write_aux_reg(ARC_REG_SLC_CTRL, ctrl); |
583 | |
584 | /* |
585 | * Lower bits are ignored, no need to clip |
586 | * END needs to be setup before START (latter triggers the operation) |
587 | * END can't be same as START, so add (l2_line_sz - 1) to sz |
588 | */ |
589 | end = paddr + sz + l2_line_sz - 1; |
590 | if (is_pae40_enabled()) |
591 | write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end)); |
592 | |
593 | write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end)); |
594 | |
595 | if (is_pae40_enabled()) |
596 | write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr)); |
597 | |
598 | write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr)); |
599 | |
600 | /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ |
601 | read_aux_reg(ARC_REG_SLC_CTRL); |
602 | |
603 | while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); |
604 | |
605 | spin_unlock_irqrestore(&lock, flags); |
606 | #endif |
607 | } |
608 | |
609 | static __maybe_unused noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op) |
610 | { |
611 | #ifdef CONFIG_ISA_ARCV2 |
612 | /* |
613 | * SLC is shared between all cores and concurrent aux operations from |
614 | * multiple cores need to be serialized using a spinlock |
615 | * A concurrent operation can be silently ignored and/or the old/new |
616 | * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop |
617 | * below) |
618 | */ |
619 | static DEFINE_SPINLOCK(lock); |
620 | |
621 | const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1); |
622 | unsigned int ctrl, cmd; |
623 | unsigned long flags; |
624 | int num_lines; |
625 | |
626 | spin_lock_irqsave(&lock, flags); |
627 | |
628 | ctrl = read_aux_reg(ARC_REG_SLC_CTRL); |
629 | |
630 | /* Don't rely on default value of IM bit */ |
631 | if (!(op & OP_FLUSH)) /* i.e. OP_INV */ |
632 | ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ |
633 | else |
634 | ctrl |= SLC_CTRL_IM; |
635 | |
636 | write_aux_reg(ARC_REG_SLC_CTRL, ctrl); |
637 | |
638 | cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL; |
639 | |
640 | sz += paddr & ~SLC_LINE_MASK; |
641 | paddr &= SLC_LINE_MASK; |
642 | |
643 | num_lines = DIV_ROUND_UP(sz, l2_line_sz); |
644 | |
645 | while (num_lines-- > 0) { |
646 | write_aux_reg(cmd, paddr); |
647 | paddr += l2_line_sz; |
648 | } |
649 | |
650 | /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ |
651 | read_aux_reg(ARC_REG_SLC_CTRL); |
652 | |
653 | while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); |
654 | |
655 | spin_unlock_irqrestore(&lock, flags); |
656 | #endif |
657 | } |
658 | |
659 | #define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op) |
660 | |
661 | noinline static void slc_entire_op(const int op) |
662 | { |
663 | unsigned int ctrl, r = ARC_REG_SLC_CTRL; |
664 | |
665 | ctrl = read_aux_reg(r); |
666 | |
667 | if (!(op & OP_FLUSH)) /* i.e. OP_INV */ |
668 | ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ |
669 | else |
670 | ctrl |= SLC_CTRL_IM; |
671 | |
672 | write_aux_reg(r, ctrl); |
673 | |
674 | if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ |
675 | write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1); |
676 | else |
677 | write_aux_reg(ARC_REG_SLC_FLUSH, 0x1); |
678 | |
679 | /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ |
680 | read_aux_reg(r); |
681 | |
682 | /* Important to wait for flush to complete */ |
683 | while (read_aux_reg(r) & SLC_CTRL_BUSY); |
684 | } |
685 | |
686 | static inline void arc_slc_disable(void) |
687 | { |
688 | const int r = ARC_REG_SLC_CTRL; |
689 | |
690 | slc_entire_op(OP_FLUSH_N_INV); |
691 | write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS); |
692 | } |
693 | |
694 | static inline void arc_slc_enable(void) |
695 | { |
696 | const int r = ARC_REG_SLC_CTRL; |
697 | |
698 | write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS); |
699 | } |
700 | |
701 | /*********************************************************** |
702 | * Exported APIs |
703 | */ |
704 | |
705 | void flush_dcache_folio(struct folio *folio) |
706 | { |
707 | clear_bit(PG_dc_clean, &folio->flags); |
708 | return; |
709 | } |
710 | EXPORT_SYMBOL(flush_dcache_folio); |
711 | |
712 | void flush_dcache_page(struct page *page) |
713 | { |
714 | return flush_dcache_folio(page_folio(page)); |
715 | } |
716 | EXPORT_SYMBOL(flush_dcache_page); |
717 | |
718 | /* |
719 | * DMA ops for systems with L1 cache only |
720 | * Make memory coherent with L1 cache by flushing/invalidating L1 lines |
721 | */ |
722 | static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz) |
723 | { |
724 | __dc_line_op_k(start, sz, OP_FLUSH_N_INV); |
725 | } |
726 | |
727 | static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz) |
728 | { |
729 | __dc_line_op_k(start, sz, OP_INV); |
730 | } |
731 | |
732 | static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz) |
733 | { |
734 | __dc_line_op_k(start, sz, OP_FLUSH); |
735 | } |
736 | |
737 | /* |
738 | * DMA ops for systems with both L1 and L2 caches, but without IOC |
739 | * Both L1 and L2 lines need to be explicitly flushed/invalidated |
740 | */ |
741 | static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz) |
742 | { |
743 | __dc_line_op_k(start, sz, OP_FLUSH_N_INV); |
744 | slc_op(start, sz, OP_FLUSH_N_INV); |
745 | } |
746 | |
747 | static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz) |
748 | { |
749 | __dc_line_op_k(start, sz, OP_INV); |
750 | slc_op(start, sz, OP_INV); |
751 | } |
752 | |
753 | static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz) |
754 | { |
755 | __dc_line_op_k(start, sz, OP_FLUSH); |
756 | slc_op(start, sz, OP_FLUSH); |
757 | } |
758 | |
759 | /* |
760 | * Exported DMA API |
761 | */ |
762 | void dma_cache_wback_inv(phys_addr_t start, unsigned long sz) |
763 | { |
764 | __dma_cache_wback_inv(start, sz); |
765 | } |
766 | EXPORT_SYMBOL(dma_cache_wback_inv); |
767 | |
768 | void dma_cache_inv(phys_addr_t start, unsigned long sz) |
769 | { |
770 | __dma_cache_inv(start, sz); |
771 | } |
772 | EXPORT_SYMBOL(dma_cache_inv); |
773 | |
774 | void dma_cache_wback(phys_addr_t start, unsigned long sz) |
775 | { |
776 | __dma_cache_wback(start, sz); |
777 | } |
778 | EXPORT_SYMBOL(dma_cache_wback); |
779 | |
780 | /* |
781 | * This is API for making I/D Caches consistent when modifying |
782 | * kernel code (loadable modules, kprobes, kgdb...) |
783 | * This is called on insmod, with kernel virtual address for CODE of |
784 | * the module. ARC cache maintenance ops require PHY address thus we |
785 | * need to convert vmalloc addr to PHY addr |
786 | */ |
787 | void flush_icache_range(unsigned long kstart, unsigned long kend) |
788 | { |
789 | unsigned int tot_sz; |
790 | |
791 | WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr" , __func__); |
792 | |
793 | /* Shortcut for bigger flush ranges. |
794 | * Here we don't care if this was kernel virtual or phy addr |
795 | */ |
796 | tot_sz = kend - kstart; |
797 | if (tot_sz > PAGE_SIZE) { |
798 | flush_cache_all(); |
799 | return; |
800 | } |
801 | |
802 | /* Case: Kernel Phy addr (0x8000_0000 onwards) */ |
803 | if (likely(kstart > PAGE_OFFSET)) { |
804 | /* |
805 | * The 2nd arg despite being paddr will be used to index icache |
806 | * This is OK since no alternate virtual mappings will exist |
807 | * given the callers for this case: kprobe/kgdb in built-in |
808 | * kernel code only. |
809 | */ |
810 | __sync_icache_dcache(kstart, kstart, kend - kstart); |
811 | return; |
812 | } |
813 | |
814 | /* |
815 | * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff) |
816 | * (1) ARC Cache Maintenance ops only take Phy addr, hence special |
817 | * handling of kernel vaddr. |
818 | * |
819 | * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already), |
820 | * it still needs to handle a 2 page scenario, where the range |
821 | * straddles across 2 virtual pages and hence need for loop |
822 | */ |
823 | while (tot_sz > 0) { |
824 | unsigned int off, sz; |
825 | unsigned long phy, pfn; |
826 | |
827 | off = kstart % PAGE_SIZE; |
828 | pfn = vmalloc_to_pfn(addr: (void *)kstart); |
829 | phy = (pfn << PAGE_SHIFT) + off; |
830 | sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off); |
831 | __sync_icache_dcache(phy, kstart, sz); |
832 | kstart += sz; |
833 | tot_sz -= sz; |
834 | } |
835 | } |
836 | EXPORT_SYMBOL(flush_icache_range); |
837 | |
838 | /* |
839 | * General purpose helper to make I and D cache lines consistent. |
840 | * @paddr is phy addr of region |
841 | * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc) |
842 | * However in one instance, when called by kprobe (for a breakpt in |
843 | * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will |
844 | * use a paddr to index the cache (despite VIPT). This is fine since a |
845 | * builtin kernel page will not have any virtual mappings. |
846 | * kprobe on loadable module will be kernel vaddr. |
847 | */ |
848 | void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len) |
849 | { |
850 | __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); |
851 | __ic_line_inv_vaddr(paddr, vaddr, len); |
852 | } |
853 | |
854 | /* wrapper to compile time eliminate alignment checks in flush loop */ |
855 | void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr) |
856 | { |
857 | __ic_line_inv_vaddr(paddr, vaddr, nr * PAGE_SIZE); |
858 | } |
859 | |
860 | /* |
861 | * wrapper to clearout kernel or userspace mappings of a page |
862 | * For kernel mappings @vaddr == @paddr |
863 | */ |
864 | void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr) |
865 | { |
866 | __dc_line_op(paddr, vaddr & PAGE_MASK, nr * PAGE_SIZE, OP_FLUSH_N_INV); |
867 | } |
868 | |
869 | noinline void flush_cache_all(void) |
870 | { |
871 | unsigned long flags; |
872 | |
873 | local_irq_save(flags); |
874 | |
875 | __ic_entire_inv(); |
876 | __dc_entire_op(OP_FLUSH_N_INV); |
877 | |
878 | local_irq_restore(flags); |
879 | |
880 | } |
881 | |
882 | void copy_user_highpage(struct page *to, struct page *from, |
883 | unsigned long u_vaddr, struct vm_area_struct *vma) |
884 | { |
885 | struct folio *src = page_folio(from); |
886 | struct folio *dst = page_folio(to); |
887 | void *kfrom = kmap_atomic(page: from); |
888 | void *kto = kmap_atomic(page: to); |
889 | |
890 | copy_page(to: kto, from: kfrom); |
891 | |
892 | clear_bit(PG_dc_clean, &dst->flags); |
893 | clear_bit(PG_dc_clean, &src->flags); |
894 | |
895 | kunmap_atomic(kto); |
896 | kunmap_atomic(kfrom); |
897 | } |
898 | |
899 | void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) |
900 | { |
901 | struct folio *folio = page_folio(page); |
902 | clear_page(page: to); |
903 | clear_bit(PG_dc_clean, &folio->flags); |
904 | } |
905 | EXPORT_SYMBOL(clear_user_page); |
906 | |
907 | /********************************************************************** |
908 | * Explicit Cache flush request from user space via syscall |
909 | * Needed for JITs which generate code on the fly |
910 | */ |
911 | SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags) |
912 | { |
913 | /* TBD: optimize this */ |
914 | flush_cache_all(); |
915 | return 0; |
916 | } |
917 | |
918 | /* |
919 | * IO-Coherency (IOC) setup rules: |
920 | * |
921 | * 1. Needs to be at system level, so only once by Master core |
922 | * Non-Masters need not be accessing caches at that time |
923 | * - They are either HALT_ON_RESET and kick started much later or |
924 | * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot() |
925 | * doesn't perturb caches or coherency unit |
926 | * |
927 | * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC, |
928 | * otherwise any straggler data might behave strangely post IOC enabling |
929 | * |
930 | * 3. All Caches need to be disabled when setting up IOC to elide any in-flight |
931 | * Coherency transactions |
932 | */ |
933 | static noinline void __init arc_ioc_setup(void) |
934 | { |
935 | unsigned int ioc_base, mem_sz; |
936 | |
937 | /* |
938 | * If IOC was already enabled (due to bootloader) it technically needs to |
939 | * be reconfigured with aperture base,size corresponding to Linux memory map |
940 | * which will certainly be different than uboot's. But disabling and |
941 | * reenabling IOC when DMA might be potentially active is tricky business. |
942 | * To avoid random memory issues later, just panic here and ask user to |
943 | * upgrade bootloader to one which doesn't enable IOC |
944 | */ |
945 | if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT) |
946 | panic(fmt: "IOC already enabled, please upgrade bootloader!\n" ); |
947 | |
948 | if (!ioc_enable) |
949 | return; |
950 | |
951 | /* Flush + invalidate + disable L1 dcache */ |
952 | __dc_disable(); |
953 | |
954 | /* Flush + invalidate SLC */ |
955 | if (read_aux_reg(ARC_REG_SLC_BCR)) |
956 | slc_entire_op(OP_FLUSH_N_INV); |
957 | |
958 | /* |
959 | * currently IOC Aperture covers entire DDR |
960 | * TBD: fix for PGU + 1GB of low mem |
961 | * TBD: fix for PAE |
962 | */ |
963 | mem_sz = arc_get_mem_sz(); |
964 | |
965 | if (!is_power_of_2(n: mem_sz) || mem_sz < 4096) |
966 | panic(fmt: "IOC Aperture size must be power of 2 larger than 4KB" ); |
967 | |
968 | /* |
969 | * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB, |
970 | * so setting 0x11 implies 512MB, 0x12 implies 1GB... |
971 | */ |
972 | write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2); |
973 | |
974 | /* for now assume kernel base is start of IOC aperture */ |
975 | ioc_base = CONFIG_LINUX_RAM_BASE; |
976 | |
977 | if (ioc_base % mem_sz != 0) |
978 | panic(fmt: "IOC Aperture start must be aligned to the size of the aperture" ); |
979 | |
980 | write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12); |
981 | write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT); |
982 | write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT); |
983 | |
984 | /* Re-enable L1 dcache */ |
985 | __dc_enable(); |
986 | } |
987 | |
988 | /* |
989 | * Cache related boot time checks/setups only needed on master CPU: |
990 | * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES) |
991 | * Assume SMP only, so all cores will have same cache config. A check on |
992 | * one core suffices for all |
993 | * - IOC setup / dma callbacks only need to be done once |
994 | */ |
995 | static noinline void __init arc_cache_init_master(void) |
996 | { |
997 | if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { |
998 | struct cpuinfo_arc_cache *ic = &ic_info; |
999 | |
1000 | if (!ic->line_len) |
1001 | panic(fmt: "cache support enabled but non-existent cache\n" ); |
1002 | |
1003 | if (ic->line_len != L1_CACHE_BYTES) |
1004 | panic(fmt: "ICache line [%d] != kernel Config [%d]" , |
1005 | ic->line_len, L1_CACHE_BYTES); |
1006 | |
1007 | /* |
1008 | * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG |
1009 | * pair to provide vaddr/paddr respectively, just as in MMU v3 |
1010 | */ |
1011 | if (is_isa_arcv2() && ic->colors > 1) |
1012 | _cache_line_loop_ic_fn = __cache_line_loop_v3; |
1013 | else |
1014 | _cache_line_loop_ic_fn = __cache_line_loop; |
1015 | } |
1016 | |
1017 | if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) { |
1018 | struct cpuinfo_arc_cache *dc = &dc_info; |
1019 | |
1020 | if (!dc->line_len) |
1021 | panic(fmt: "cache support enabled but non-existent cache\n" ); |
1022 | |
1023 | if (dc->line_len != L1_CACHE_BYTES) |
1024 | panic(fmt: "DCache line [%d] != kernel Config [%d]" , |
1025 | dc->line_len, L1_CACHE_BYTES); |
1026 | |
1027 | /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */ |
1028 | if (is_isa_arcompact() && dc->colors > 1) { |
1029 | panic(fmt: "Aliasing VIPT cache not supported\n" ); |
1030 | } |
1031 | } |
1032 | |
1033 | /* |
1034 | * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger |
1035 | * or equal to any cache line length. |
1036 | */ |
1037 | BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES, |
1038 | "SMP_CACHE_BYTES must be >= any cache line length" ); |
1039 | if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES)) |
1040 | panic(fmt: "L2 Cache line [%d] > kernel Config [%d]\n" , |
1041 | l2_line_sz, SMP_CACHE_BYTES); |
1042 | |
1043 | /* Note that SLC disable not formally supported till HS 3.0 */ |
1044 | if (is_isa_arcv2() && l2_line_sz && !slc_enable) |
1045 | arc_slc_disable(); |
1046 | |
1047 | if (is_isa_arcv2() && ioc_exists) |
1048 | arc_ioc_setup(); |
1049 | |
1050 | if (is_isa_arcv2() && l2_line_sz && slc_enable) { |
1051 | __dma_cache_wback_inv = __dma_cache_wback_inv_slc; |
1052 | __dma_cache_inv = __dma_cache_inv_slc; |
1053 | __dma_cache_wback = __dma_cache_wback_slc; |
1054 | } else { |
1055 | __dma_cache_wback_inv = __dma_cache_wback_inv_l1; |
1056 | __dma_cache_inv = __dma_cache_inv_l1; |
1057 | __dma_cache_wback = __dma_cache_wback_l1; |
1058 | } |
1059 | /* |
1060 | * In case of IOC (say IOC+SLC case), pointers above could still be set |
1061 | * but end up not being relevant as the first function in chain is not |
1062 | * called at all for devices using coherent DMA. |
1063 | * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*() |
1064 | */ |
1065 | } |
1066 | |
1067 | void __ref arc_cache_init(void) |
1068 | { |
1069 | unsigned int __maybe_unused cpu = smp_processor_id(); |
1070 | |
1071 | if (!cpu) |
1072 | arc_cache_init_master(); |
1073 | |
1074 | /* |
1075 | * In PAE regime, TLB and cache maintenance ops take wider addresses |
1076 | * And even if PAE is not enabled in kernel, the upper 32-bits still need |
1077 | * to be zeroed to keep the ops sane. |
1078 | * As an optimization for more common !PAE enabled case, zero them out |
1079 | * once at init, rather than checking/setting to 0 for every runtime op |
1080 | */ |
1081 | if (is_isa_arcv2() && pae40_exist_but_not_enab()) { |
1082 | |
1083 | if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) |
1084 | write_aux_reg(ARC_REG_IC_PTAG_HI, 0); |
1085 | |
1086 | if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) |
1087 | write_aux_reg(ARC_REG_DC_PTAG_HI, 0); |
1088 | |
1089 | if (l2_line_sz) { |
1090 | write_aux_reg(ARC_REG_SLC_RGN_END1, 0); |
1091 | write_aux_reg(ARC_REG_SLC_RGN_START1, 0); |
1092 | } |
1093 | } |
1094 | } |
1095 | |