1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * Copyright (C) 2013 Imagination Technologies |
4 | * Author: Paul Burton <paul.burton@mips.com> |
5 | */ |
6 | |
7 | #include <linux/init.h> |
8 | #include <asm/addrspace.h> |
9 | #include <asm/asm.h> |
10 | #include <asm/asm-offsets.h> |
11 | #include <asm/asmmacro.h> |
12 | #include <asm/cacheops.h> |
13 | #include <asm/eva.h> |
14 | #include <asm/mipsregs.h> |
15 | #include <asm/mipsmtregs.h> |
16 | #include <asm/pm.h> |
17 | #include <asm/smp-cps.h> |
18 | |
19 | #define GCR_CPC_BASE_OFS 0x0088 |
20 | #define GCR_CL_COHERENCE_OFS 0x2008 |
21 | #define GCR_CL_ID_OFS 0x2028 |
22 | |
23 | #define CPC_CL_VC_STOP_OFS 0x2020 |
24 | #define CPC_CL_VC_RUN_OFS 0x2028 |
25 | |
26 | .extern mips_cm_base |
27 | |
28 | .set noreorder |
29 | |
30 | #ifdef CONFIG_64BIT |
31 | # define STATUS_BITDEPS ST0_KX |
32 | #else |
33 | # define STATUS_BITDEPS 0 |
34 | #endif |
35 | |
36 | #ifdef CONFIG_MIPS_CPS_NS16550 |
37 | |
38 | #define DUMP_EXCEP(name) \ |
39 | PTR_LA a0, 8f; \ |
40 | jal mips_cps_bev_dump; \ |
41 | nop; \ |
42 | TEXT(name) |
43 | |
44 | #else /* !CONFIG_MIPS_CPS_NS16550 */ |
45 | |
46 | #define DUMP_EXCEP(name) |
47 | |
48 | #endif /* !CONFIG_MIPS_CPS_NS16550 */ |
49 | |
50 | /* |
51 | * Set dest to non-zero if the core supports the MT ASE, else zero. If |
52 | * MT is not supported then branch to nomt. |
53 | */ |
54 | .macro has_mt dest, nomt |
55 | mfc0 \dest, CP0_CONFIG, 1 |
56 | bgez \dest, \nomt |
57 | mfc0 \dest, CP0_CONFIG, 2 |
58 | bgez \dest, \nomt |
59 | mfc0 \dest, CP0_CONFIG, 3 |
60 | andi \dest, \dest, MIPS_CONF3_MT |
61 | beqz \dest, \nomt |
62 | nop |
63 | .endm |
64 | |
65 | /* |
66 | * Set dest to non-zero if the core supports MIPSr6 multithreading |
67 | * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then |
68 | * branch to nomt. |
69 | */ |
70 | .macro has_vp dest, nomt |
71 | mfc0 \dest, CP0_CONFIG, 1 |
72 | bgez \dest, \nomt |
73 | mfc0 \dest, CP0_CONFIG, 2 |
74 | bgez \dest, \nomt |
75 | mfc0 \dest, CP0_CONFIG, 3 |
76 | bgez \dest, \nomt |
77 | mfc0 \dest, CP0_CONFIG, 4 |
78 | bgez \dest, \nomt |
79 | mfc0 \dest, CP0_CONFIG, 5 |
80 | andi \dest, \dest, MIPS_CONF5_VP |
81 | beqz \dest, \nomt |
82 | nop |
83 | .endm |
84 | |
85 | |
86 | LEAF(mips_cps_core_boot) |
87 | /* Save CCA and GCR base */ |
88 | move s0, a0 |
89 | move s1, a1 |
90 | |
91 | /* We don't know how to do coherence setup on earlier ISA */ |
92 | #if MIPS_ISA_REV > 0 |
93 | /* Skip cache & coherence setup if we're already coherent */ |
94 | lw s7, GCR_CL_COHERENCE_OFS(s1) |
95 | bnez s7, 1f |
96 | nop |
97 | |
98 | /* Initialize the L1 caches */ |
99 | jal mips_cps_cache_init |
100 | nop |
101 | |
102 | /* Enter the coherent domain */ |
103 | li t0, 0xff |
104 | sw t0, GCR_CL_COHERENCE_OFS(s1) |
105 | ehb |
106 | #endif /* MIPS_ISA_REV > 0 */ |
107 | |
108 | /* Set Kseg0 CCA to that in s0 */ |
109 | 1: mfc0 t0, CP0_CONFIG |
110 | ori t0, 0x7 |
111 | xori t0, 0x7 |
112 | or t0, t0, s0 |
113 | mtc0 t0, CP0_CONFIG |
114 | ehb |
115 | |
116 | /* Jump to kseg0 */ |
117 | PTR_LA t0, 1f |
118 | jr t0 |
119 | nop |
120 | |
121 | /* |
122 | * We're up, cached & coherent. Perform any EVA initialization necessary |
123 | * before we access memory. |
124 | */ |
125 | 1: eva_init |
126 | |
127 | /* Retrieve boot configuration pointers */ |
128 | jal mips_cps_get_bootcfg |
129 | nop |
130 | |
131 | /* Skip core-level init if we started up coherent */ |
132 | bnez s7, 1f |
133 | nop |
134 | |
135 | /* Perform any further required core-level initialisation */ |
136 | jal mips_cps_core_init |
137 | nop |
138 | |
139 | /* |
140 | * Boot any other VPEs within this core that should be online, and |
141 | * deactivate this VPE if it should be offline. |
142 | */ |
143 | move a1, t9 |
144 | jal mips_cps_boot_vpes |
145 | move a0, v0 |
146 | |
147 | /* Off we go! */ |
148 | 1: PTR_L t1, VPEBOOTCFG_PC(v1) |
149 | PTR_L gp, VPEBOOTCFG_GP(v1) |
150 | PTR_L sp, VPEBOOTCFG_SP(v1) |
151 | jr t1 |
152 | nop |
153 | END(mips_cps_core_boot) |
154 | |
155 | __INIT |
156 | LEAF(excep_tlbfill) |
157 | DUMP_EXCEP("TLB Fill" ) |
158 | b . |
159 | nop |
160 | END(excep_tlbfill) |
161 | |
162 | LEAF(excep_xtlbfill) |
163 | DUMP_EXCEP("XTLB Fill" ) |
164 | b . |
165 | nop |
166 | END(excep_xtlbfill) |
167 | |
168 | LEAF(excep_cache) |
169 | DUMP_EXCEP("Cache" ) |
170 | b . |
171 | nop |
172 | END(excep_cache) |
173 | |
174 | LEAF(excep_genex) |
175 | DUMP_EXCEP("General" ) |
176 | b . |
177 | nop |
178 | END(excep_genex) |
179 | |
180 | LEAF(excep_intex) |
181 | DUMP_EXCEP("Interrupt" ) |
182 | b . |
183 | nop |
184 | END(excep_intex) |
185 | |
186 | LEAF(excep_ejtag) |
187 | PTR_LA k0, ejtag_debug_handler |
188 | jr k0 |
189 | nop |
190 | END(excep_ejtag) |
191 | __FINIT |
192 | |
193 | LEAF(mips_cps_core_init) |
194 | #ifdef CONFIG_MIPS_MT_SMP |
195 | /* Check that the core implements the MT ASE */ |
196 | has_mt t0, 3f |
197 | |
198 | .set push |
199 | .set MIPS_ISA_LEVEL_RAW |
200 | .set mt |
201 | |
202 | /* Only allow 1 TC per VPE to execute... */ |
203 | dmt |
204 | |
205 | /* ...and for the moment only 1 VPE */ |
206 | dvpe |
207 | PTR_LA t1, 1f |
208 | jr.hb t1 |
209 | nop |
210 | |
211 | /* Enter VPE configuration state */ |
212 | 1: mfc0 t0, CP0_MVPCONTROL |
213 | ori t0, t0, MVPCONTROL_VPC |
214 | mtc0 t0, CP0_MVPCONTROL |
215 | |
216 | /* Retrieve the number of VPEs within the core */ |
217 | mfc0 t0, CP0_MVPCONF0 |
218 | srl t0, t0, MVPCONF0_PVPE_SHIFT |
219 | andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) |
220 | addiu ta3, t0, 1 |
221 | |
222 | /* If there's only 1, we're done */ |
223 | beqz t0, 2f |
224 | nop |
225 | |
226 | /* Loop through each VPE within this core */ |
227 | li ta1, 1 |
228 | |
229 | 1: /* Operate on the appropriate TC */ |
230 | mtc0 ta1, CP0_VPECONTROL |
231 | ehb |
232 | |
233 | /* Bind TC to VPE (1:1 TC:VPE mapping) */ |
234 | mttc0 ta1, CP0_TCBIND |
235 | |
236 | /* Set exclusive TC, non-active, master */ |
237 | li t0, VPECONF0_MVP |
238 | sll t1, ta1, VPECONF0_XTC_SHIFT |
239 | or t0, t0, t1 |
240 | mttc0 t0, CP0_VPECONF0 |
241 | |
242 | /* Set TC non-active, non-allocatable */ |
243 | mttc0 zero, CP0_TCSTATUS |
244 | |
245 | /* Set TC halted */ |
246 | li t0, TCHALT_H |
247 | mttc0 t0, CP0_TCHALT |
248 | |
249 | /* Next VPE */ |
250 | addiu ta1, ta1, 1 |
251 | slt t0, ta1, ta3 |
252 | bnez t0, 1b |
253 | nop |
254 | |
255 | /* Leave VPE configuration state */ |
256 | 2: mfc0 t0, CP0_MVPCONTROL |
257 | xori t0, t0, MVPCONTROL_VPC |
258 | mtc0 t0, CP0_MVPCONTROL |
259 | |
260 | 3: .set pop |
261 | #endif |
262 | jr ra |
263 | nop |
264 | END(mips_cps_core_init) |
265 | |
266 | /** |
267 | * mips_cps_get_bootcfg() - retrieve boot configuration pointers |
268 | * |
269 | * Returns: pointer to struct core_boot_config in v0, pointer to |
270 | * struct vpe_boot_config in v1, VPE ID in t9 |
271 | */ |
272 | LEAF(mips_cps_get_bootcfg) |
273 | /* Calculate a pointer to this cores struct core_boot_config */ |
274 | lw t0, GCR_CL_ID_OFS(s1) |
275 | li t1, COREBOOTCFG_SIZE |
276 | mul t0, t0, t1 |
277 | PTR_LA t1, mips_cps_core_bootcfg |
278 | PTR_L t1, 0(t1) |
279 | PTR_ADDU v0, t0, t1 |
280 | |
281 | /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ |
282 | li t9, 0 |
283 | #if defined(CONFIG_CPU_MIPSR6) |
284 | has_vp ta2, 1f |
285 | |
286 | /* |
287 | * Assume non-contiguous numbering. Perhaps some day we'll need |
288 | * to handle contiguous VP numbering, but no such systems yet |
289 | * exist. |
290 | */ |
291 | mfc0 t9, CP0_GLOBALNUMBER |
292 | andi t9, t9, MIPS_GLOBALNUMBER_VP |
293 | #elif defined(CONFIG_MIPS_MT_SMP) |
294 | has_mt ta2, 1f |
295 | |
296 | /* Find the number of VPEs present in the core */ |
297 | mfc0 t1, CP0_MVPCONF0 |
298 | srl t1, t1, MVPCONF0_PVPE_SHIFT |
299 | andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT |
300 | addiu t1, t1, 1 |
301 | |
302 | /* Calculate a mask for the VPE ID from EBase.CPUNum */ |
303 | clz t1, t1 |
304 | li t2, 31 |
305 | subu t1, t2, t1 |
306 | li t2, 1 |
307 | sll t1, t2, t1 |
308 | addiu t1, t1, -1 |
309 | |
310 | /* Retrieve the VPE ID from EBase.CPUNum */ |
311 | mfc0 t9, $15, 1 |
312 | and t9, t9, t1 |
313 | #endif |
314 | |
315 | 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ |
316 | li t1, VPEBOOTCFG_SIZE |
317 | mul v1, t9, t1 |
318 | PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) |
319 | PTR_ADDU v1, v1, ta3 |
320 | |
321 | jr ra |
322 | nop |
323 | END(mips_cps_get_bootcfg) |
324 | |
325 | LEAF(mips_cps_boot_vpes) |
326 | lw ta2, COREBOOTCFG_VPEMASK(a0) |
327 | PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) |
328 | |
329 | #if defined(CONFIG_CPU_MIPSR6) |
330 | |
331 | has_vp t0, 5f |
332 | |
333 | /* Find base address of CPC */ |
334 | PTR_LA t1, mips_gcr_base |
335 | PTR_L t1, 0(t1) |
336 | PTR_L t1, GCR_CPC_BASE_OFS(t1) |
337 | PTR_LI t2, ~0x7fff |
338 | and t1, t1, t2 |
339 | PTR_LI t2, UNCAC_BASE |
340 | PTR_ADD t1, t1, t2 |
341 | |
342 | /* Start any other VPs that ought to be running */ |
343 | PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) |
344 | |
345 | /* Ensure this VP stops running if it shouldn't be */ |
346 | not ta2 |
347 | PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) |
348 | ehb |
349 | |
350 | #elif defined(CONFIG_MIPS_MT) |
351 | |
352 | /* If the core doesn't support MT then return */ |
353 | has_mt t0, 5f |
354 | |
355 | /* Enter VPE configuration state */ |
356 | .set push |
357 | .set MIPS_ISA_LEVEL_RAW |
358 | .set mt |
359 | dvpe |
360 | .set pop |
361 | |
362 | PTR_LA t1, 1f |
363 | jr.hb t1 |
364 | nop |
365 | 1: mfc0 t1, CP0_MVPCONTROL |
366 | ori t1, t1, MVPCONTROL_VPC |
367 | mtc0 t1, CP0_MVPCONTROL |
368 | ehb |
369 | |
370 | /* Loop through each VPE */ |
371 | move t8, ta2 |
372 | li ta1, 0 |
373 | |
374 | /* Check whether the VPE should be running. If not, skip it */ |
375 | 1: andi t0, ta2, 1 |
376 | beqz t0, 2f |
377 | nop |
378 | |
379 | /* Operate on the appropriate TC */ |
380 | mfc0 t0, CP0_VPECONTROL |
381 | ori t0, t0, VPECONTROL_TARGTC |
382 | xori t0, t0, VPECONTROL_TARGTC |
383 | or t0, t0, ta1 |
384 | mtc0 t0, CP0_VPECONTROL |
385 | ehb |
386 | |
387 | .set push |
388 | .set MIPS_ISA_LEVEL_RAW |
389 | .set mt |
390 | |
391 | /* Skip the VPE if its TC is not halted */ |
392 | mftc0 t0, CP0_TCHALT |
393 | beqz t0, 2f |
394 | nop |
395 | |
396 | /* Calculate a pointer to the VPEs struct vpe_boot_config */ |
397 | li t0, VPEBOOTCFG_SIZE |
398 | mul t0, t0, ta1 |
399 | PTR_ADDU t0, t0, ta3 |
400 | |
401 | /* Set the TC restart PC */ |
402 | lw t1, VPEBOOTCFG_PC(t0) |
403 | mttc0 t1, CP0_TCRESTART |
404 | |
405 | /* Set the TC stack pointer */ |
406 | lw t1, VPEBOOTCFG_SP(t0) |
407 | mttgpr t1, sp |
408 | |
409 | /* Set the TC global pointer */ |
410 | lw t1, VPEBOOTCFG_GP(t0) |
411 | mttgpr t1, gp |
412 | |
413 | /* Copy config from this VPE */ |
414 | mfc0 t0, CP0_CONFIG |
415 | mttc0 t0, CP0_CONFIG |
416 | |
417 | /* |
418 | * Copy the EVA config from this VPE if the CPU supports it. |
419 | * CONFIG3 must exist to be running MT startup - just read it. |
420 | */ |
421 | mfc0 t0, CP0_CONFIG, 3 |
422 | and t0, t0, MIPS_CONF3_SC |
423 | beqz t0, 3f |
424 | nop |
425 | mfc0 t0, CP0_SEGCTL0 |
426 | mttc0 t0, CP0_SEGCTL0 |
427 | mfc0 t0, CP0_SEGCTL1 |
428 | mttc0 t0, CP0_SEGCTL1 |
429 | mfc0 t0, CP0_SEGCTL2 |
430 | mttc0 t0, CP0_SEGCTL2 |
431 | 3: |
432 | /* Ensure no software interrupts are pending */ |
433 | mttc0 zero, CP0_CAUSE |
434 | mttc0 zero, CP0_STATUS |
435 | |
436 | /* Set TC active, not interrupt exempt */ |
437 | mftc0 t0, CP0_TCSTATUS |
438 | li t1, ~TCSTATUS_IXMT |
439 | and t0, t0, t1 |
440 | ori t0, t0, TCSTATUS_A |
441 | mttc0 t0, CP0_TCSTATUS |
442 | |
443 | /* Clear the TC halt bit */ |
444 | mttc0 zero, CP0_TCHALT |
445 | |
446 | /* Set VPE active */ |
447 | mftc0 t0, CP0_VPECONF0 |
448 | ori t0, t0, VPECONF0_VPA |
449 | mttc0 t0, CP0_VPECONF0 |
450 | |
451 | /* Next VPE */ |
452 | 2: srl ta2, ta2, 1 |
453 | addiu ta1, ta1, 1 |
454 | bnez ta2, 1b |
455 | nop |
456 | |
457 | /* Leave VPE configuration state */ |
458 | mfc0 t1, CP0_MVPCONTROL |
459 | xori t1, t1, MVPCONTROL_VPC |
460 | mtc0 t1, CP0_MVPCONTROL |
461 | ehb |
462 | evpe |
463 | |
464 | .set pop |
465 | |
466 | /* Check whether this VPE is meant to be running */ |
467 | li t0, 1 |
468 | sll t0, t0, a1 |
469 | and t0, t0, t8 |
470 | bnez t0, 2f |
471 | nop |
472 | |
473 | /* This VPE should be offline, halt the TC */ |
474 | li t0, TCHALT_H |
475 | mtc0 t0, CP0_TCHALT |
476 | PTR_LA t0, 1f |
477 | 1: jr.hb t0 |
478 | nop |
479 | |
480 | 2: |
481 | |
482 | #endif /* CONFIG_MIPS_MT_SMP */ |
483 | |
484 | /* Return */ |
485 | 5: jr ra |
486 | nop |
487 | END(mips_cps_boot_vpes) |
488 | |
489 | #if MIPS_ISA_REV > 0 |
490 | LEAF(mips_cps_cache_init) |
491 | /* |
492 | * Clear the bits used to index the caches. Note that the architecture |
493 | * dictates that writing to any of TagLo or TagHi selects 0 or 2 should |
494 | * be valid for all MIPS32 CPUs, even those for which said writes are |
495 | * unnecessary. |
496 | */ |
497 | mtc0 zero, CP0_TAGLO, 0 |
498 | mtc0 zero, CP0_TAGHI, 0 |
499 | mtc0 zero, CP0_TAGLO, 2 |
500 | mtc0 zero, CP0_TAGHI, 2 |
501 | ehb |
502 | |
503 | /* Primary cache configuration is indicated by Config1 */ |
504 | mfc0 v0, CP0_CONFIG, 1 |
505 | |
506 | /* Detect I-cache line size */ |
507 | _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ |
508 | beqz t0, icache_done |
509 | li t1, 2 |
510 | sllv t0, t1, t0 |
511 | |
512 | /* Detect I-cache size */ |
513 | _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ |
514 | xori t2, t1, 0x7 |
515 | beqz t2, 1f |
516 | li t3, 32 |
517 | addiu t1, t1, 1 |
518 | sllv t1, t3, t1 |
519 | 1: /* At this point t1 == I-cache sets per way */ |
520 | _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ |
521 | addiu t2, t2, 1 |
522 | mul t1, t1, t0 |
523 | mul t1, t1, t2 |
524 | |
525 | li a0, CKSEG0 |
526 | PTR_ADD a1, a0, t1 |
527 | 1: cache Index_Store_Tag_I, 0(a0) |
528 | PTR_ADD a0, a0, t0 |
529 | bne a0, a1, 1b |
530 | nop |
531 | icache_done: |
532 | |
533 | /* Detect D-cache line size */ |
534 | _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ |
535 | beqz t0, dcache_done |
536 | li t1, 2 |
537 | sllv t0, t1, t0 |
538 | |
539 | /* Detect D-cache size */ |
540 | _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ |
541 | xori t2, t1, 0x7 |
542 | beqz t2, 1f |
543 | li t3, 32 |
544 | addiu t1, t1, 1 |
545 | sllv t1, t3, t1 |
546 | 1: /* At this point t1 == D-cache sets per way */ |
547 | _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ |
548 | addiu t2, t2, 1 |
549 | mul t1, t1, t0 |
550 | mul t1, t1, t2 |
551 | |
552 | li a0, CKSEG0 |
553 | PTR_ADDU a1, a0, t1 |
554 | PTR_SUBU a1, a1, t0 |
555 | 1: cache Index_Store_Tag_D, 0(a0) |
556 | bne a0, a1, 1b |
557 | PTR_ADD a0, a0, t0 |
558 | dcache_done: |
559 | |
560 | jr ra |
561 | nop |
562 | END(mips_cps_cache_init) |
563 | #endif /* MIPS_ISA_REV > 0 */ |
564 | |
565 | #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) |
566 | |
567 | /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ |
568 | .macro psstate dest |
569 | .set push |
570 | .set noat |
571 | lw $1, TI_CPU(gp) |
572 | sll $1, $1, LONGLOG |
573 | PTR_LA \dest, __per_cpu_offset |
574 | PTR_ADDU $1, $1, \dest |
575 | lw $1, 0($1) |
576 | PTR_LA \dest, cps_cpu_state |
577 | PTR_ADDU \dest, \dest, $1 |
578 | .set pop |
579 | .endm |
580 | |
581 | LEAF(mips_cps_pm_save) |
582 | /* Save CPU state */ |
583 | SUSPEND_SAVE_REGS |
584 | psstate t1 |
585 | SUSPEND_SAVE_STATIC |
586 | jr v0 |
587 | nop |
588 | END(mips_cps_pm_save) |
589 | |
590 | LEAF(mips_cps_pm_restore) |
591 | /* Restore CPU state */ |
592 | psstate t1 |
593 | RESUME_RESTORE_STATIC |
594 | RESUME_RESTORE_REGS_RETURN |
595 | END(mips_cps_pm_restore) |
596 | |
597 | #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */ |
598 | |