1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright (C) 2014 Imagination Technologies |
4 | * Author: Paul Burton <paul.burton@mips.com> |
5 | */ |
6 | |
7 | #include <linux/cpuhotplug.h> |
8 | #include <linux/init.h> |
9 | #include <linux/percpu.h> |
10 | #include <linux/slab.h> |
11 | #include <linux/suspend.h> |
12 | |
13 | #include <asm/asm-offsets.h> |
14 | #include <asm/cacheflush.h> |
15 | #include <asm/cacheops.h> |
16 | #include <asm/idle.h> |
17 | #include <asm/mips-cps.h> |
18 | #include <asm/mipsmtregs.h> |
19 | #include <asm/pm.h> |
20 | #include <asm/pm-cps.h> |
21 | #include <asm/regdef.h> |
22 | #include <asm/smp-cps.h> |
23 | #include <asm/uasm.h> |
24 | |
25 | /* |
26 | * cps_nc_entry_fn - type of a generated non-coherent state entry function |
27 | * @online: the count of online coupled VPEs |
28 | * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count |
29 | * |
30 | * The code entering & exiting non-coherent states is generated at runtime |
31 | * using uasm, in order to ensure that the compiler cannot insert a stray |
32 | * memory access at an unfortunate time and to allow the generation of optimal |
33 | * core-specific code particularly for cache routines. If coupled_coherence |
34 | * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state, |
35 | * returns the number of VPEs that were in the wait state at the point this |
36 | * VPE left it. Returns garbage if coupled_coherence is zero or this is not |
37 | * the entry function for CPS_PM_NC_WAIT. |
38 | */ |
39 | typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count); |
40 | |
41 | /* |
42 | * The entry point of the generated non-coherent idle state entry/exit |
43 | * functions. Actually per-core rather than per-CPU. |
44 | */ |
45 | static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT], |
46 | nc_asm_enter); |
47 | |
48 | /* Bitmap indicating which states are supported by the system */ |
49 | static DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); |
50 | |
51 | /* |
52 | * Indicates the number of coupled VPEs ready to operate in a non-coherent |
53 | * state. Actually per-core rather than per-CPU. |
54 | */ |
55 | static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); |
56 | |
57 | /* Indicates online CPUs coupled with the current CPU */ |
58 | static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); |
59 | |
60 | /* |
61 | * Used to synchronize entry to deep idle states. Actually per-core rather |
62 | * than per-CPU. |
63 | */ |
64 | static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); |
65 | |
66 | /* Saved CPU state across the CPS_PM_POWER_GATED state */ |
67 | DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state); |
68 | |
69 | /* A somewhat arbitrary number of labels & relocs for uasm */ |
70 | static struct uasm_label labels[32]; |
71 | static struct uasm_reloc relocs[32]; |
72 | |
73 | bool cps_pm_support_state(enum cps_pm_state state) |
74 | { |
75 | return test_bit(state, state_support); |
76 | } |
77 | |
78 | static void coupled_barrier(atomic_t *a, unsigned online) |
79 | { |
80 | /* |
81 | * This function is effectively the same as |
82 | * cpuidle_coupled_parallel_barrier, which can't be used here since |
83 | * there's no cpuidle device. |
84 | */ |
85 | |
86 | if (!coupled_coherence) |
87 | return; |
88 | |
89 | smp_mb__before_atomic(); |
90 | atomic_inc(v: a); |
91 | |
92 | while (atomic_read(v: a) < online) |
93 | cpu_relax(); |
94 | |
95 | if (atomic_inc_return(v: a) == online * 2) { |
96 | atomic_set(v: a, i: 0); |
97 | return; |
98 | } |
99 | |
100 | while (atomic_read(v: a) > online) |
101 | cpu_relax(); |
102 | } |
103 | |
104 | int cps_pm_enter_state(enum cps_pm_state state) |
105 | { |
106 | unsigned cpu = smp_processor_id(); |
107 | unsigned core = cpu_core(¤t_cpu_data); |
108 | unsigned online, left; |
109 | cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); |
110 | u32 *core_ready_count, *nc_core_ready_count; |
111 | void *nc_addr; |
112 | cps_nc_entry_fn entry; |
113 | struct core_boot_config *core_cfg; |
114 | struct vpe_boot_config *vpe_cfg; |
115 | |
116 | /* Check that there is an entry function for this state */ |
117 | entry = per_cpu(nc_asm_enter, core)[state]; |
118 | if (!entry) |
119 | return -EINVAL; |
120 | |
121 | /* Calculate which coupled CPUs (VPEs) are online */ |
122 | #if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6) |
123 | if (cpu_online(cpu)) { |
124 | cpumask_and(coupled_mask, cpu_online_mask, |
125 | &cpu_sibling_map[cpu]); |
126 | online = cpumask_weight(coupled_mask); |
127 | cpumask_clear_cpu(cpu, coupled_mask); |
128 | } else |
129 | #endif |
130 | { |
131 | cpumask_clear(dstp: coupled_mask); |
132 | online = 1; |
133 | } |
134 | |
135 | /* Setup the VPE to run mips_cps_pm_restore when started again */ |
136 | if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { |
137 | /* Power gating relies upon CPS SMP */ |
138 | if (!mips_cps_smp_in_use()) |
139 | return -EINVAL; |
140 | |
141 | core_cfg = &mips_cps_core_bootcfg[core]; |
142 | vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(¤t_cpu_data)]; |
143 | vpe_cfg->pc = (unsigned long)mips_cps_pm_restore; |
144 | vpe_cfg->gp = (unsigned long)current_thread_info(); |
145 | vpe_cfg->sp = 0; |
146 | } |
147 | |
148 | /* Indicate that this CPU might not be coherent */ |
149 | cpumask_clear_cpu(cpu, dstp: &cpu_coherent_mask); |
150 | smp_mb__after_atomic(); |
151 | |
152 | /* Create a non-coherent mapping of the core ready_count */ |
153 | core_ready_count = per_cpu(ready_count, core); |
154 | nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), |
155 | (unsigned long)core_ready_count); |
156 | nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); |
157 | nc_core_ready_count = nc_addr; |
158 | |
159 | /* Ensure ready_count is zero-initialised before the assembly runs */ |
160 | WRITE_ONCE(*nc_core_ready_count, 0); |
161 | coupled_barrier(a: &per_cpu(pm_barrier, core), online); |
162 | |
163 | /* Run the generated entry code */ |
164 | left = entry(online, nc_core_ready_count); |
165 | |
166 | /* Remove the non-coherent mapping of ready_count */ |
167 | kunmap_noncoherent(); |
168 | |
169 | /* Indicate that this CPU is definitely coherent */ |
170 | cpumask_set_cpu(cpu, dstp: &cpu_coherent_mask); |
171 | |
172 | /* |
173 | * If this VPE is the first to leave the non-coherent wait state then |
174 | * it needs to wake up any coupled VPEs still running their wait |
175 | * instruction so that they return to cpuidle, which can then complete |
176 | * coordination between the coupled VPEs & provide the governor with |
177 | * a chance to reflect on the length of time the VPEs were in the |
178 | * idle state. |
179 | */ |
180 | if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online)) |
181 | arch_send_call_function_ipi_mask(mask: coupled_mask); |
182 | |
183 | return 0; |
184 | } |
185 | |
186 | static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, |
187 | struct uasm_reloc **pr, |
188 | const struct cache_desc *cache, |
189 | unsigned op, int lbl) |
190 | { |
191 | unsigned cache_size = cache->ways << cache->waybit; |
192 | unsigned i; |
193 | const unsigned unroll_lines = 32; |
194 | |
195 | /* If the cache isn't present this function has it easy */ |
196 | if (cache->flags & MIPS_CACHE_NOT_PRESENT) |
197 | return; |
198 | |
199 | /* Load base address */ |
200 | UASM_i_LA(pp, GPR_T0, (long)CKSEG0); |
201 | |
202 | /* Calculate end address */ |
203 | if (cache_size < 0x8000) |
204 | uasm_i_addiu(pp, GPR_T1, GPR_T0, cache_size); |
205 | else |
206 | UASM_i_LA(pp, GPR_T1, (long)(CKSEG0 + cache_size)); |
207 | |
208 | /* Start of cache op loop */ |
209 | uasm_build_label(pl, *pp, lbl); |
210 | |
211 | /* Generate the cache ops */ |
212 | for (i = 0; i < unroll_lines; i++) { |
213 | if (cpu_has_mips_r6) { |
214 | uasm_i_cache(pp, op, 0, GPR_T0); |
215 | uasm_i_addiu(pp, GPR_T0, GPR_T0, cache->linesz); |
216 | } else { |
217 | uasm_i_cache(pp, op, i * cache->linesz, GPR_T0); |
218 | } |
219 | } |
220 | |
221 | if (!cpu_has_mips_r6) |
222 | /* Update the base address */ |
223 | uasm_i_addiu(pp, GPR_T0, GPR_T0, unroll_lines * cache->linesz); |
224 | |
225 | /* Loop if we haven't reached the end address yet */ |
226 | uasm_il_bne(pp, pr, GPR_T0, GPR_T1, lbl); |
227 | uasm_i_nop(pp); |
228 | } |
229 | |
230 | static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, |
231 | struct uasm_reloc **pr, |
232 | const struct cpuinfo_mips *cpu_info, |
233 | int lbl) |
234 | { |
235 | unsigned i, fsb_size = 8; |
236 | unsigned num_loads = (fsb_size * 3) / 2; |
237 | unsigned line_stride = 2; |
238 | unsigned line_size = cpu_info->dcache.linesz; |
239 | unsigned perf_counter, perf_event; |
240 | unsigned revision = cpu_info->processor_id & PRID_REV_MASK; |
241 | |
242 | /* |
243 | * Determine whether this CPU requires an FSB flush, and if so which |
244 | * performance counter/event reflect stalls due to a full FSB. |
245 | */ |
246 | switch (__get_cpu_type(cpu_info->cputype)) { |
247 | case CPU_INTERAPTIV: |
248 | perf_counter = 1; |
249 | perf_event = 51; |
250 | break; |
251 | |
252 | case CPU_PROAPTIV: |
253 | /* Newer proAptiv cores don't require this workaround */ |
254 | if (revision >= PRID_REV_ENCODE_332(1, 1, 0)) |
255 | return 0; |
256 | |
257 | /* On older ones it's unavailable */ |
258 | return -1; |
259 | |
260 | default: |
261 | /* Assume that the CPU does not need this workaround */ |
262 | return 0; |
263 | } |
264 | |
265 | /* |
266 | * Ensure that the fill/store buffer (FSB) is not holding the results |
267 | * of a prefetch, since if it is then the CPC sequencer may become |
268 | * stuck in the D3 (ClrBus) state whilst entering a low power state. |
269 | */ |
270 | |
271 | /* Preserve perf counter setup */ |
272 | uasm_i_mfc0(pp, GPR_T2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ |
273 | uasm_i_mfc0(pp, GPR_T3, 25, (perf_counter * 2) + 1); /* PerfCntN */ |
274 | |
275 | /* Setup perf counter to count FSB full pipeline stalls */ |
276 | uasm_i_addiu(pp, GPR_T0, GPR_ZERO, (perf_event << 5) | 0xf); |
277 | uasm_i_mtc0(pp, GPR_T0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ |
278 | uasm_i_ehb(pp); |
279 | uasm_i_mtc0(pp, GPR_ZERO, 25, (perf_counter * 2) + 1); /* PerfCntN */ |
280 | uasm_i_ehb(pp); |
281 | |
282 | /* Base address for loads */ |
283 | UASM_i_LA(pp, GPR_T0, (long)CKSEG0); |
284 | |
285 | /* Start of clear loop */ |
286 | uasm_build_label(pl, *pp, lbl); |
287 | |
288 | /* Perform some loads to fill the FSB */ |
289 | for (i = 0; i < num_loads; i++) |
290 | uasm_i_lw(pp, GPR_ZERO, i * line_size * line_stride, GPR_T0); |
291 | |
292 | /* |
293 | * Invalidate the new D-cache entries so that the cache will need |
294 | * refilling (via the FSB) if the loop is executed again. |
295 | */ |
296 | for (i = 0; i < num_loads; i++) { |
297 | uasm_i_cache(pp, Hit_Invalidate_D, |
298 | i * line_size * line_stride, GPR_T0); |
299 | uasm_i_cache(pp, Hit_Writeback_Inv_SD, |
300 | i * line_size * line_stride, GPR_T0); |
301 | } |
302 | |
303 | /* Barrier ensuring previous cache invalidates are complete */ |
304 | uasm_i_sync(pp, __SYNC_full); |
305 | uasm_i_ehb(pp); |
306 | |
307 | /* Check whether the pipeline stalled due to the FSB being full */ |
308 | uasm_i_mfc0(pp, GPR_T1, 25, (perf_counter * 2) + 1); /* PerfCntN */ |
309 | |
310 | /* Loop if it didn't */ |
311 | uasm_il_beqz(pp, pr, GPR_T1, lbl); |
312 | uasm_i_nop(pp); |
313 | |
314 | /* Restore perf counter 1. The count may well now be wrong... */ |
315 | uasm_i_mtc0(pp, GPR_T2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ |
316 | uasm_i_ehb(pp); |
317 | uasm_i_mtc0(pp, GPR_T3, 25, (perf_counter * 2) + 1); /* PerfCntN */ |
318 | uasm_i_ehb(pp); |
319 | |
320 | return 0; |
321 | } |
322 | |
323 | static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, |
324 | struct uasm_reloc **pr, |
325 | unsigned r_addr, int lbl) |
326 | { |
327 | uasm_i_lui(pp, GPR_T0, uasm_rel_hi(0x80000000)); |
328 | uasm_build_label(pl, *pp, lbl); |
329 | uasm_i_ll(pp, GPR_T1, 0, r_addr); |
330 | uasm_i_or(pp, GPR_T1, GPR_T1, GPR_T0); |
331 | uasm_i_sc(pp, GPR_T1, 0, r_addr); |
332 | uasm_il_beqz(pp, pr, GPR_T1, lbl); |
333 | uasm_i_nop(pp); |
334 | } |
335 | |
336 | static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) |
337 | { |
338 | struct uasm_label *l = labels; |
339 | struct uasm_reloc *r = relocs; |
340 | u32 *buf, *p; |
341 | const unsigned r_online = GPR_A0; |
342 | const unsigned r_nc_count = GPR_A1; |
343 | const unsigned r_pcohctl = GPR_T8; |
344 | const unsigned max_instrs = 256; |
345 | unsigned cpc_cmd; |
346 | int err; |
347 | enum { |
348 | lbl_incready = 1, |
349 | lbl_poll_cont, |
350 | lbl_secondary_hang, |
351 | lbl_disable_coherence, |
352 | lbl_flush_fsb, |
353 | lbl_invicache, |
354 | lbl_flushdcache, |
355 | lbl_hang, |
356 | lbl_set_cont, |
357 | lbl_secondary_cont, |
358 | lbl_decready, |
359 | }; |
360 | |
361 | /* Allocate a buffer to hold the generated code */ |
362 | p = buf = kcalloc(n: max_instrs, size: sizeof(u32), GFP_KERNEL); |
363 | if (!buf) |
364 | return NULL; |
365 | |
366 | /* Clear labels & relocs ready for (re)use */ |
367 | memset(labels, 0, sizeof(labels)); |
368 | memset(relocs, 0, sizeof(relocs)); |
369 | |
370 | if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { |
371 | /* Power gating relies upon CPS SMP */ |
372 | if (!mips_cps_smp_in_use()) |
373 | goto out_err; |
374 | |
375 | /* |
376 | * Save CPU state. Note the non-standard calling convention |
377 | * with the return address placed in v0 to avoid clobbering |
378 | * the ra register before it is saved. |
379 | */ |
380 | UASM_i_LA(&p, GPR_T0, (long)mips_cps_pm_save); |
381 | uasm_i_jalr(&p, GPR_V0, GPR_T0); |
382 | uasm_i_nop(&p); |
383 | } |
384 | |
385 | /* |
386 | * Load addresses of required CM & CPC registers. This is done early |
387 | * because they're needed in both the enable & disable coherence steps |
388 | * but in the coupled case the enable step will only run on one VPE. |
389 | */ |
390 | UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence()); |
391 | |
392 | if (coupled_coherence) { |
393 | /* Increment ready_count */ |
394 | uasm_i_sync(&p, __SYNC_mb); |
395 | uasm_build_label(&l, p, lbl_incready); |
396 | uasm_i_ll(&p, GPR_T1, 0, r_nc_count); |
397 | uasm_i_addiu(&p, GPR_T2, GPR_T1, 1); |
398 | uasm_i_sc(&p, GPR_T2, 0, r_nc_count); |
399 | uasm_il_beqz(&p, &r, GPR_T2, lbl_incready); |
400 | uasm_i_addiu(&p, GPR_T1, GPR_T1, 1); |
401 | |
402 | /* Barrier ensuring all CPUs see the updated r_nc_count value */ |
403 | uasm_i_sync(&p, __SYNC_mb); |
404 | |
405 | /* |
406 | * If this is the last VPE to become ready for non-coherence |
407 | * then it should branch below. |
408 | */ |
409 | uasm_il_beq(&p, &r, GPR_T1, r_online, lbl_disable_coherence); |
410 | uasm_i_nop(&p); |
411 | |
412 | if (state < CPS_PM_POWER_GATED) { |
413 | /* |
414 | * Otherwise this is not the last VPE to become ready |
415 | * for non-coherence. It needs to wait until coherence |
416 | * has been disabled before proceeding, which it will do |
417 | * by polling for the top bit of ready_count being set. |
418 | */ |
419 | uasm_i_addiu(&p, GPR_T1, GPR_ZERO, -1); |
420 | uasm_build_label(&l, p, lbl_poll_cont); |
421 | uasm_i_lw(&p, GPR_T0, 0, r_nc_count); |
422 | uasm_il_bltz(&p, &r, GPR_T0, lbl_secondary_cont); |
423 | uasm_i_ehb(&p); |
424 | if (cpu_has_mipsmt) |
425 | uasm_i_yield(&p, GPR_ZERO, GPR_T1); |
426 | uasm_il_b(&p, &r, lbl_poll_cont); |
427 | uasm_i_nop(&p); |
428 | } else { |
429 | /* |
430 | * The core will lose power & this VPE will not continue |
431 | * so it can simply halt here. |
432 | */ |
433 | if (cpu_has_mipsmt) { |
434 | /* Halt the VPE via C0 tchalt register */ |
435 | uasm_i_addiu(&p, GPR_T0, GPR_ZERO, TCHALT_H); |
436 | uasm_i_mtc0(&p, GPR_T0, 2, 4); |
437 | } else if (cpu_has_vp) { |
438 | /* Halt the VP via the CPC VP_STOP register */ |
439 | unsigned int vpe_id; |
440 | |
441 | vpe_id = cpu_vpe_id(&cpu_data[cpu]); |
442 | uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << vpe_id); |
443 | UASM_i_LA(&p, GPR_T1, (long)addr_cpc_cl_vp_stop()); |
444 | uasm_i_sw(&p, GPR_T0, 0, GPR_T1); |
445 | } else { |
446 | BUG(); |
447 | } |
448 | uasm_build_label(&l, p, lbl_secondary_hang); |
449 | uasm_il_b(&p, &r, lbl_secondary_hang); |
450 | uasm_i_nop(&p); |
451 | } |
452 | } |
453 | |
454 | /* |
455 | * This is the point of no return - this VPE will now proceed to |
456 | * disable coherence. At this point we *must* be sure that no other |
457 | * VPE within the core will interfere with the L1 dcache. |
458 | */ |
459 | uasm_build_label(&l, p, lbl_disable_coherence); |
460 | |
461 | /* Invalidate the L1 icache */ |
462 | cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache, |
463 | Index_Invalidate_I, lbl_invicache); |
464 | |
465 | /* Writeback & invalidate the L1 dcache */ |
466 | cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache, |
467 | Index_Writeback_Inv_D, lbl_flushdcache); |
468 | |
469 | /* Barrier ensuring previous cache invalidates are complete */ |
470 | uasm_i_sync(&p, __SYNC_full); |
471 | uasm_i_ehb(&p); |
472 | |
473 | if (mips_cm_revision() < CM_REV_CM3) { |
474 | /* |
475 | * Disable all but self interventions. The load from COHCTL is |
476 | * defined by the interAptiv & proAptiv SUMs as ensuring that the |
477 | * operation resulting from the preceding store is complete. |
478 | */ |
479 | uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << cpu_core(&cpu_data[cpu])); |
480 | uasm_i_sw(&p, GPR_T0, 0, r_pcohctl); |
481 | uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); |
482 | |
483 | /* Barrier to ensure write to coherence control is complete */ |
484 | uasm_i_sync(&p, __SYNC_full); |
485 | uasm_i_ehb(&p); |
486 | } |
487 | |
488 | /* Disable coherence */ |
489 | uasm_i_sw(&p, GPR_ZERO, 0, r_pcohctl); |
490 | uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); |
491 | |
492 | if (state >= CPS_PM_CLOCK_GATED) { |
493 | err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], |
494 | lbl_flush_fsb); |
495 | if (err) |
496 | goto out_err; |
497 | |
498 | /* Determine the CPC command to issue */ |
499 | switch (state) { |
500 | case CPS_PM_CLOCK_GATED: |
501 | cpc_cmd = CPC_Cx_CMD_CLOCKOFF; |
502 | break; |
503 | case CPS_PM_POWER_GATED: |
504 | cpc_cmd = CPC_Cx_CMD_PWRDOWN; |
505 | break; |
506 | default: |
507 | BUG(); |
508 | goto out_err; |
509 | } |
510 | |
511 | /* Issue the CPC command */ |
512 | UASM_i_LA(&p, GPR_T0, (long)addr_cpc_cl_cmd()); |
513 | uasm_i_addiu(&p, GPR_T1, GPR_ZERO, cpc_cmd); |
514 | uasm_i_sw(&p, GPR_T1, 0, GPR_T0); |
515 | |
516 | if (state == CPS_PM_POWER_GATED) { |
517 | /* If anything goes wrong just hang */ |
518 | uasm_build_label(&l, p, lbl_hang); |
519 | uasm_il_b(&p, &r, lbl_hang); |
520 | uasm_i_nop(&p); |
521 | |
522 | /* |
523 | * There's no point generating more code, the core is |
524 | * powered down & if powered back up will run from the |
525 | * reset vector not from here. |
526 | */ |
527 | goto gen_done; |
528 | } |
529 | |
530 | /* Barrier to ensure write to CPC command is complete */ |
531 | uasm_i_sync(&p, __SYNC_full); |
532 | uasm_i_ehb(&p); |
533 | } |
534 | |
535 | if (state == CPS_PM_NC_WAIT) { |
536 | /* |
537 | * At this point it is safe for all VPEs to proceed with |
538 | * execution. This VPE will set the top bit of ready_count |
539 | * to indicate to the other VPEs that they may continue. |
540 | */ |
541 | if (coupled_coherence) |
542 | cps_gen_set_top_bit(pp: &p, pl: &l, pr: &r, r_addr: r_nc_count, |
543 | lbl: lbl_set_cont); |
544 | |
545 | /* |
546 | * VPEs which did not disable coherence will continue |
547 | * executing, after coherence has been disabled, from this |
548 | * point. |
549 | */ |
550 | uasm_build_label(&l, p, lbl_secondary_cont); |
551 | |
552 | /* Now perform our wait */ |
553 | uasm_i_wait(&p, 0); |
554 | } |
555 | |
556 | /* |
557 | * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs |
558 | * will run this. The first will actually re-enable coherence & the |
559 | * rest will just be performing a rather unusual nop. |
560 | */ |
561 | uasm_i_addiu(&p, GPR_T0, GPR_ZERO, mips_cm_revision() < CM_REV_CM3 |
562 | ? CM_GCR_Cx_COHERENCE_COHDOMAINEN |
563 | : CM3_GCR_Cx_COHERENCE_COHEN); |
564 | |
565 | uasm_i_sw(&p, GPR_T0, 0, r_pcohctl); |
566 | uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); |
567 | |
568 | /* Barrier to ensure write to coherence control is complete */ |
569 | uasm_i_sync(&p, __SYNC_full); |
570 | uasm_i_ehb(&p); |
571 | |
572 | if (coupled_coherence && (state == CPS_PM_NC_WAIT)) { |
573 | /* Decrement ready_count */ |
574 | uasm_build_label(&l, p, lbl_decready); |
575 | uasm_i_sync(&p, __SYNC_mb); |
576 | uasm_i_ll(&p, GPR_T1, 0, r_nc_count); |
577 | uasm_i_addiu(&p, GPR_T2, GPR_T1, -1); |
578 | uasm_i_sc(&p, GPR_T2, 0, r_nc_count); |
579 | uasm_il_beqz(&p, &r, GPR_T2, lbl_decready); |
580 | uasm_i_andi(&p, GPR_V0, GPR_T1, (1 << fls(smp_num_siblings)) - 1); |
581 | |
582 | /* Barrier ensuring all CPUs see the updated r_nc_count value */ |
583 | uasm_i_sync(&p, __SYNC_mb); |
584 | } |
585 | |
586 | if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) { |
587 | /* |
588 | * At this point it is safe for all VPEs to proceed with |
589 | * execution. This VPE will set the top bit of ready_count |
590 | * to indicate to the other VPEs that they may continue. |
591 | */ |
592 | cps_gen_set_top_bit(pp: &p, pl: &l, pr: &r, r_addr: r_nc_count, lbl: lbl_set_cont); |
593 | |
594 | /* |
595 | * This core will be reliant upon another core sending a |
596 | * power-up command to the CPC in order to resume operation. |
597 | * Thus an arbitrary VPE can't trigger the core leaving the |
598 | * idle state and the one that disables coherence might as well |
599 | * be the one to re-enable it. The rest will continue from here |
600 | * after that has been done. |
601 | */ |
602 | uasm_build_label(&l, p, lbl_secondary_cont); |
603 | |
604 | /* Barrier ensuring all CPUs see the updated r_nc_count value */ |
605 | uasm_i_sync(&p, __SYNC_mb); |
606 | } |
607 | |
608 | /* The core is coherent, time to return to C code */ |
609 | uasm_i_jr(&p, GPR_RA); |
610 | uasm_i_nop(&p); |
611 | |
612 | gen_done: |
613 | /* Ensure the code didn't exceed the resources allocated for it */ |
614 | BUG_ON((p - buf) > max_instrs); |
615 | BUG_ON((l - labels) > ARRAY_SIZE(labels)); |
616 | BUG_ON((r - relocs) > ARRAY_SIZE(relocs)); |
617 | |
618 | /* Patch branch offsets */ |
619 | uasm_resolve_relocs(relocs, labels); |
620 | |
621 | /* Flush the icache */ |
622 | local_flush_icache_range((unsigned long)buf, (unsigned long)p); |
623 | |
624 | return buf; |
625 | out_err: |
626 | kfree(objp: buf); |
627 | return NULL; |
628 | } |
629 | |
630 | static int cps_pm_online_cpu(unsigned int cpu) |
631 | { |
632 | enum cps_pm_state state; |
633 | unsigned core = cpu_core(&cpu_data[cpu]); |
634 | void *entry_fn, *core_rc; |
635 | |
636 | for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { |
637 | if (per_cpu(nc_asm_enter, core)[state]) |
638 | continue; |
639 | if (!test_bit(state, state_support)) |
640 | continue; |
641 | |
642 | entry_fn = cps_gen_entry_code(cpu, state); |
643 | if (!entry_fn) { |
644 | pr_err("Failed to generate core %u state %u entry\n" , |
645 | core, state); |
646 | clear_bit(state, state_support); |
647 | } |
648 | |
649 | per_cpu(nc_asm_enter, core)[state] = entry_fn; |
650 | } |
651 | |
652 | if (!per_cpu(ready_count, core)) { |
653 | core_rc = kmalloc(size: sizeof(u32), GFP_KERNEL); |
654 | if (!core_rc) { |
655 | pr_err("Failed allocate core %u ready_count\n" , core); |
656 | return -ENOMEM; |
657 | } |
658 | per_cpu(ready_count, core) = core_rc; |
659 | } |
660 | |
661 | return 0; |
662 | } |
663 | |
664 | static int cps_pm_power_notifier(struct notifier_block *this, |
665 | unsigned long event, void *ptr) |
666 | { |
667 | unsigned int stat; |
668 | |
669 | switch (event) { |
670 | case PM_SUSPEND_PREPARE: |
671 | stat = read_cpc_cl_stat_conf(); |
672 | /* |
673 | * If we're attempting to suspend the system and power down all |
674 | * of the cores, the JTAG detect bit indicates that the CPC will |
675 | * instead put the cores into clock-off state. In this state |
676 | * a connected debugger can cause the CPU to attempt |
677 | * interactions with the powered down system. At best this will |
678 | * fail. At worst, it can hang the NoC, requiring a hard reset. |
679 | * To avoid this, just block system suspend if a JTAG probe |
680 | * is detected. |
681 | */ |
682 | if (stat & CPC_Cx_STAT_CONF_EJTAG_PROBE) { |
683 | pr_warn("JTAG probe is connected - abort suspend\n" ); |
684 | return NOTIFY_BAD; |
685 | } |
686 | return NOTIFY_DONE; |
687 | default: |
688 | return NOTIFY_DONE; |
689 | } |
690 | } |
691 | |
692 | static int __init cps_pm_init(void) |
693 | { |
694 | /* A CM is required for all non-coherent states */ |
695 | if (!mips_cm_present()) { |
696 | pr_warn("pm-cps: no CM, non-coherent states unavailable\n" ); |
697 | return 0; |
698 | } |
699 | |
700 | /* |
701 | * If interrupts were enabled whilst running a wait instruction on a |
702 | * non-coherent core then the VPE may end up processing interrupts |
703 | * whilst non-coherent. That would be bad. |
704 | */ |
705 | if (cpu_wait == r4k_wait_irqoff) |
706 | set_bit(CPS_PM_NC_WAIT, state_support); |
707 | else |
708 | pr_warn("pm-cps: non-coherent wait unavailable\n" ); |
709 | |
710 | /* Detect whether a CPC is present */ |
711 | if (mips_cpc_present()) { |
712 | /* Detect whether clock gating is implemented */ |
713 | if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL) |
714 | set_bit(CPS_PM_CLOCK_GATED, state_support); |
715 | else |
716 | pr_warn("pm-cps: CPC does not support clock gating\n" ); |
717 | |
718 | /* Power gating is available with CPS SMP & any CPC */ |
719 | if (mips_cps_smp_in_use()) |
720 | set_bit(CPS_PM_POWER_GATED, state_support); |
721 | else |
722 | pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n" ); |
723 | } else { |
724 | pr_warn("pm-cps: no CPC, clock & power gating unavailable\n" ); |
725 | } |
726 | |
727 | pm_notifier(cps_pm_power_notifier, 0); |
728 | |
729 | return cpuhp_setup_state(state: CPUHP_AP_ONLINE_DYN, name: "mips/cps_pm:online" , |
730 | startup: cps_pm_online_cpu, NULL); |
731 | } |
732 | arch_initcall(cps_pm_init); |
733 | |