1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * OMAP MPUSS low power code |
4 | * |
5 | * Copyright (C) 2011 Texas Instruments, Inc. |
6 | * Santosh Shilimkar <santosh.shilimkar@ti.com> |
7 | * |
8 | * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU |
9 | * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller, |
10 | * CPU0 and CPU1 LPRM modules. |
11 | * CPU0, CPU1 and MPUSS each have there own power domain and |
12 | * hence multiple low power combinations of MPUSS are possible. |
13 | * |
14 | * The CPU0 and CPU1 can't support Closed switch Retention (CSWR) |
15 | * because the mode is not supported by hw constraints of dormant |
16 | * mode. While waking up from the dormant mode, a reset signal |
17 | * to the Cortex-A9 processor must be asserted by the external |
18 | * power controller. |
19 | * |
20 | * With architectural inputs and hardware recommendations, only |
21 | * below modes are supported from power gain vs latency point of view. |
22 | * |
23 | * CPU0 CPU1 MPUSS |
24 | * ---------------------------------------------- |
25 | * ON ON ON |
26 | * ON(Inactive) OFF ON(Inactive) |
27 | * OFF OFF CSWR |
28 | * OFF OFF OSWR |
29 | * OFF OFF OFF(Device OFF *TBD) |
30 | * ---------------------------------------------- |
31 | * |
32 | * Note: CPU0 is the master core and it is the last CPU to go down |
33 | * and first to wake-up when MPUSS low power states are excercised |
34 | */ |
35 | |
36 | #include <linux/cpuidle.h> |
37 | #include <linux/kernel.h> |
38 | #include <linux/io.h> |
39 | #include <linux/errno.h> |
40 | #include <linux/linkage.h> |
41 | #include <linux/smp.h> |
42 | |
43 | #include <asm/cacheflush.h> |
44 | #include <asm/tlbflush.h> |
45 | #include <asm/smp_scu.h> |
46 | #include <asm/suspend.h> |
47 | #include <asm/virt.h> |
48 | #include <asm/hardware/cache-l2x0.h> |
49 | |
50 | #include "soc.h" |
51 | #include "common.h" |
52 | #include "omap44xx.h" |
53 | #include "omap4-sar-layout.h" |
54 | #include "pm.h" |
55 | #include "prcm_mpu44xx.h" |
56 | #include "prcm_mpu54xx.h" |
57 | #include "prminst44xx.h" |
58 | #include "prcm44xx.h" |
59 | #include "prm44xx.h" |
60 | #include "prm-regbits-44xx.h" |
61 | |
62 | static void __iomem *sar_base; |
63 | static u32 old_cpu1_ns_pa_addr; |
64 | |
65 | #if defined(CONFIG_PM) && defined(CONFIG_SMP) |
66 | |
67 | struct omap4_cpu_pm_info { |
68 | struct powerdomain *pwrdm; |
69 | void __iomem *scu_sar_addr; |
70 | void __iomem *wkup_sar_addr; |
71 | void __iomem *l2x0_sar_addr; |
72 | }; |
73 | |
74 | /** |
75 | * struct cpu_pm_ops - CPU pm operations |
76 | * @finish_suspend: CPU suspend finisher function pointer |
77 | * @resume: CPU resume function pointer |
78 | * @scu_prepare: CPU Snoop Control program function pointer |
79 | * @hotplug_restart: CPU restart function pointer |
80 | * |
81 | * Structure holds functions pointer for CPU low power operations like |
82 | * suspend, resume and scu programming. |
83 | */ |
84 | struct cpu_pm_ops { |
85 | int (*finish_suspend)(unsigned long cpu_state); |
86 | void (*resume)(void); |
87 | void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state); |
88 | void (*hotplug_restart)(void); |
89 | }; |
90 | |
91 | static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info); |
92 | static struct powerdomain *mpuss_pd; |
93 | static u32 cpu_context_offset; |
94 | |
95 | static int default_finish_suspend(unsigned long cpu_state) |
96 | { |
97 | omap_do_wfi(); |
98 | return 0; |
99 | } |
100 | |
101 | static void dummy_cpu_resume(void) |
102 | {} |
103 | |
104 | static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state) |
105 | {} |
106 | |
107 | static struct cpu_pm_ops omap_pm_ops = { |
108 | .finish_suspend = default_finish_suspend, |
109 | .resume = dummy_cpu_resume, |
110 | .scu_prepare = dummy_scu_prepare, |
111 | .hotplug_restart = dummy_cpu_resume, |
112 | }; |
113 | |
114 | /* |
115 | * Program the wakeup routine address for the CPU0 and CPU1 |
116 | * used for OFF or DORMANT wakeup. |
117 | */ |
118 | static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr) |
119 | { |
120 | struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); |
121 | |
122 | if (pm_info->wkup_sar_addr) |
123 | writel_relaxed(addr, pm_info->wkup_sar_addr); |
124 | } |
125 | |
126 | /* |
127 | * Store the SCU power status value to scratchpad memory |
128 | */ |
129 | static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state) |
130 | { |
131 | struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); |
132 | u32 scu_pwr_st; |
133 | |
134 | switch (cpu_state) { |
135 | case PWRDM_POWER_RET: |
136 | scu_pwr_st = SCU_PM_DORMANT; |
137 | break; |
138 | case PWRDM_POWER_OFF: |
139 | scu_pwr_st = SCU_PM_POWEROFF; |
140 | break; |
141 | case PWRDM_POWER_ON: |
142 | case PWRDM_POWER_INACTIVE: |
143 | default: |
144 | scu_pwr_st = SCU_PM_NORMAL; |
145 | break; |
146 | } |
147 | |
148 | if (pm_info->scu_sar_addr) |
149 | writel_relaxed(scu_pwr_st, pm_info->scu_sar_addr); |
150 | } |
151 | |
152 | /* Helper functions for MPUSS OSWR */ |
153 | static inline void mpuss_clear_prev_logic_pwrst(void) |
154 | { |
155 | u32 reg; |
156 | |
157 | reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, |
158 | OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); |
159 | omap4_prminst_write_inst_reg(val: reg, OMAP4430_PRM_PARTITION, |
160 | OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); |
161 | } |
162 | |
163 | static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id) |
164 | { |
165 | u32 reg; |
166 | |
167 | if (cpu_id) { |
168 | reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST, |
169 | idx: cpu_context_offset); |
170 | omap4_prcm_mpu_write_inst_reg(val: reg, OMAP4430_PRCM_MPU_CPU1_INST, |
171 | idx: cpu_context_offset); |
172 | } else { |
173 | reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST, |
174 | idx: cpu_context_offset); |
175 | omap4_prcm_mpu_write_inst_reg(val: reg, OMAP4430_PRCM_MPU_CPU0_INST, |
176 | idx: cpu_context_offset); |
177 | } |
178 | } |
179 | |
180 | /* |
181 | * Store the CPU cluster state for L2X0 low power operations. |
182 | */ |
183 | static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state) |
184 | { |
185 | struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); |
186 | |
187 | if (pm_info->l2x0_sar_addr) |
188 | writel_relaxed(save_state, pm_info->l2x0_sar_addr); |
189 | } |
190 | |
191 | /* |
192 | * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to |
193 | * in every restore MPUSS OFF path. |
194 | */ |
195 | #ifdef CONFIG_CACHE_L2X0 |
196 | static void __init save_l2x0_context(void) |
197 | { |
198 | void __iomem *l2x0_base = omap4_get_l2cache_base(); |
199 | |
200 | if (l2x0_base && sar_base) { |
201 | writel_relaxed(l2x0_saved_regs.aux_ctrl, |
202 | sar_base + L2X0_AUXCTRL_OFFSET); |
203 | writel_relaxed(l2x0_saved_regs.prefetch_ctrl, |
204 | sar_base + L2X0_PREFETCH_CTRL_OFFSET); |
205 | } |
206 | } |
207 | #else |
208 | static void __init save_l2x0_context(void) |
209 | {} |
210 | #endif |
211 | |
212 | /** |
213 | * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function |
214 | * The purpose of this function is to manage low power programming |
215 | * of OMAP4 MPUSS subsystem |
216 | * @cpu : CPU ID |
217 | * @power_state: Low power state. |
218 | * @rcuidle: RCU needs to be idled |
219 | * |
220 | * MPUSS states for the context save: |
221 | * save_state = |
222 | * 0 - Nothing lost and no need to save: MPUSS INACTIVE |
223 | * 1 - CPUx L1 and logic lost: MPUSS CSWR |
224 | * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR |
225 | * 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF |
226 | */ |
227 | __cpuidle int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state, |
228 | bool rcuidle) |
229 | { |
230 | struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); |
231 | unsigned int save_state = 0, cpu_logic_state = PWRDM_POWER_RET; |
232 | |
233 | if (omap_rev() == OMAP4430_REV_ES1_0) |
234 | return -ENXIO; |
235 | |
236 | switch (power_state) { |
237 | case PWRDM_POWER_ON: |
238 | case PWRDM_POWER_INACTIVE: |
239 | save_state = 0; |
240 | break; |
241 | case PWRDM_POWER_OFF: |
242 | cpu_logic_state = PWRDM_POWER_OFF; |
243 | save_state = 1; |
244 | break; |
245 | case PWRDM_POWER_RET: |
246 | if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE)) |
247 | save_state = 0; |
248 | break; |
249 | default: |
250 | /* |
251 | * CPUx CSWR is invalid hardware state. Also CPUx OSWR |
252 | * doesn't make much scense, since logic is lost and $L1 |
253 | * needs to be cleaned because of coherency. This makes |
254 | * CPUx OSWR equivalent to CPUX OFF and hence not supported |
255 | */ |
256 | WARN_ON(1); |
257 | return -ENXIO; |
258 | } |
259 | |
260 | pwrdm_pre_transition(NULL); |
261 | |
262 | /* |
263 | * Check MPUSS next state and save interrupt controller if needed. |
264 | * In MPUSS OSWR or device OFF, interrupt controller contest is lost. |
265 | */ |
266 | mpuss_clear_prev_logic_pwrst(); |
267 | if ((pwrdm_read_next_pwrst(pwrdm: mpuss_pd) == PWRDM_POWER_RET) && |
268 | (pwrdm_read_logic_retst(pwrdm: mpuss_pd) == PWRDM_POWER_OFF)) |
269 | save_state = 2; |
270 | |
271 | cpu_clear_prev_logic_pwrst(cpu_id: cpu); |
272 | pwrdm_set_next_pwrst(pwrdm: pm_info->pwrdm, pwrst: power_state); |
273 | pwrdm_set_logic_retst(pwrdm: pm_info->pwrdm, pwrst: cpu_logic_state); |
274 | |
275 | if (rcuidle) |
276 | ct_cpuidle_enter(); |
277 | |
278 | set_cpu_wakeup_addr(cpu_id: cpu, __pa_symbol(omap_pm_ops.resume)); |
279 | omap_pm_ops.scu_prepare(cpu, power_state); |
280 | l2x0_pwrst_prepare(cpu_id: cpu, save_state); |
281 | |
282 | /* |
283 | * Call low level function with targeted low power state. |
284 | */ |
285 | if (save_state) |
286 | cpu_suspend(save_state, omap_pm_ops.finish_suspend); |
287 | else |
288 | omap_pm_ops.finish_suspend(save_state); |
289 | |
290 | if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu) |
291 | gic_dist_enable(); |
292 | |
293 | if (rcuidle) |
294 | ct_cpuidle_exit(); |
295 | |
296 | /* |
297 | * Restore the CPUx power state to ON otherwise CPUx |
298 | * power domain can transitions to programmed low power |
299 | * state while doing WFI outside the low powe code. On |
300 | * secure devices, CPUx does WFI which can result in |
301 | * domain transition |
302 | */ |
303 | pwrdm_set_next_pwrst(pwrdm: pm_info->pwrdm, PWRDM_POWER_ON); |
304 | |
305 | pwrdm_post_transition(NULL); |
306 | |
307 | return 0; |
308 | } |
309 | |
310 | /** |
311 | * omap4_hotplug_cpu: OMAP4 CPU hotplug entry |
312 | * @cpu : CPU ID |
313 | * @power_state: CPU low power state. |
314 | */ |
315 | int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state) |
316 | { |
317 | struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); |
318 | unsigned int cpu_state = 0; |
319 | |
320 | if (omap_rev() == OMAP4430_REV_ES1_0) |
321 | return -ENXIO; |
322 | |
323 | /* Use the achievable power state for the domain */ |
324 | power_state = pwrdm_get_valid_lp_state(pwrdm: pm_info->pwrdm, |
325 | is_logic_state: false, req_state: power_state); |
326 | |
327 | if (power_state == PWRDM_POWER_OFF) |
328 | cpu_state = 1; |
329 | |
330 | pwrdm_clear_all_prev_pwrst(pwrdm: pm_info->pwrdm); |
331 | pwrdm_set_next_pwrst(pwrdm: pm_info->pwrdm, pwrst: power_state); |
332 | set_cpu_wakeup_addr(cpu_id: cpu, __pa_symbol(omap_pm_ops.hotplug_restart)); |
333 | omap_pm_ops.scu_prepare(cpu, power_state); |
334 | |
335 | /* |
336 | * CPU never retuns back if targeted power state is OFF mode. |
337 | * CPU ONLINE follows normal CPU ONLINE ptah via |
338 | * omap4_secondary_startup(). |
339 | */ |
340 | omap_pm_ops.finish_suspend(cpu_state); |
341 | |
342 | pwrdm_set_next_pwrst(pwrdm: pm_info->pwrdm, PWRDM_POWER_ON); |
343 | return 0; |
344 | } |
345 | |
346 | |
347 | /* |
348 | * Enable Mercury Fast HG retention mode by default. |
349 | */ |
350 | static void enable_mercury_retention_mode(void) |
351 | { |
352 | u32 reg; |
353 | |
354 | reg = omap4_prcm_mpu_read_inst_reg(OMAP54XX_PRCM_MPU_DEVICE_INST, |
355 | OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET); |
356 | /* Enable HG_EN, HG_RAMPUP = fast mode */ |
357 | reg |= BIT(24) | BIT(25); |
358 | omap4_prcm_mpu_write_inst_reg(val: reg, OMAP54XX_PRCM_MPU_DEVICE_INST, |
359 | OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET); |
360 | } |
361 | |
362 | /* |
363 | * Initialise OMAP4 MPUSS |
364 | */ |
365 | int __init omap4_mpuss_init(void) |
366 | { |
367 | struct omap4_cpu_pm_info *pm_info; |
368 | |
369 | if (omap_rev() == OMAP4430_REV_ES1_0) { |
370 | WARN(1, "Power Management not supported on OMAP4430 ES1.0\n" ); |
371 | return -ENODEV; |
372 | } |
373 | |
374 | /* Initilaise per CPU PM information */ |
375 | pm_info = &per_cpu(omap4_pm_info, 0x0); |
376 | if (sar_base) { |
377 | pm_info->scu_sar_addr = sar_base + SCU_OFFSET0; |
378 | if (cpu_is_omap44xx()) |
379 | pm_info->wkup_sar_addr = sar_base + |
380 | CPU0_WAKEUP_NS_PA_ADDR_OFFSET; |
381 | else |
382 | pm_info->wkup_sar_addr = sar_base + |
383 | OMAP5_CPU0_WAKEUP_NS_PA_ADDR_OFFSET; |
384 | pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0; |
385 | } |
386 | pm_info->pwrdm = pwrdm_lookup(name: "cpu0_pwrdm" ); |
387 | if (!pm_info->pwrdm) { |
388 | pr_err("Lookup failed for CPU0 pwrdm\n" ); |
389 | return -ENODEV; |
390 | } |
391 | |
392 | /* Clear CPU previous power domain state */ |
393 | pwrdm_clear_all_prev_pwrst(pwrdm: pm_info->pwrdm); |
394 | cpu_clear_prev_logic_pwrst(cpu_id: 0); |
395 | |
396 | /* Initialise CPU0 power domain state to ON */ |
397 | pwrdm_set_next_pwrst(pwrdm: pm_info->pwrdm, PWRDM_POWER_ON); |
398 | |
399 | pm_info = &per_cpu(omap4_pm_info, 0x1); |
400 | if (sar_base) { |
401 | pm_info->scu_sar_addr = sar_base + SCU_OFFSET1; |
402 | if (cpu_is_omap44xx()) |
403 | pm_info->wkup_sar_addr = sar_base + |
404 | CPU1_WAKEUP_NS_PA_ADDR_OFFSET; |
405 | else |
406 | pm_info->wkup_sar_addr = sar_base + |
407 | OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET; |
408 | pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1; |
409 | } |
410 | |
411 | pm_info->pwrdm = pwrdm_lookup(name: "cpu1_pwrdm" ); |
412 | if (!pm_info->pwrdm) { |
413 | pr_err("Lookup failed for CPU1 pwrdm\n" ); |
414 | return -ENODEV; |
415 | } |
416 | |
417 | /* Clear CPU previous power domain state */ |
418 | pwrdm_clear_all_prev_pwrst(pwrdm: pm_info->pwrdm); |
419 | cpu_clear_prev_logic_pwrst(cpu_id: 1); |
420 | |
421 | /* Initialise CPU1 power domain state to ON */ |
422 | pwrdm_set_next_pwrst(pwrdm: pm_info->pwrdm, PWRDM_POWER_ON); |
423 | |
424 | mpuss_pd = pwrdm_lookup(name: "mpu_pwrdm" ); |
425 | if (!mpuss_pd) { |
426 | pr_err("Failed to lookup MPUSS power domain\n" ); |
427 | return -ENODEV; |
428 | } |
429 | pwrdm_clear_all_prev_pwrst(pwrdm: mpuss_pd); |
430 | mpuss_clear_prev_logic_pwrst(); |
431 | |
432 | if (sar_base) { |
433 | /* Save device type on scratchpad for low level code to use */ |
434 | writel_relaxed((omap_type() != OMAP2_DEVICE_TYPE_GP) ? 1 : 0, |
435 | sar_base + OMAP_TYPE_OFFSET); |
436 | save_l2x0_context(); |
437 | } |
438 | |
439 | if (cpu_is_omap44xx()) { |
440 | omap_pm_ops.finish_suspend = omap4_finish_suspend; |
441 | omap_pm_ops.resume = omap4_cpu_resume; |
442 | omap_pm_ops.scu_prepare = scu_pwrst_prepare; |
443 | omap_pm_ops.hotplug_restart = omap4_secondary_startup; |
444 | cpu_context_offset = OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET; |
445 | } else if (soc_is_omap54xx() || soc_is_dra7xx()) { |
446 | cpu_context_offset = OMAP54XX_RM_CPU0_CPU0_CONTEXT_OFFSET; |
447 | enable_mercury_retention_mode(); |
448 | } |
449 | |
450 | if (cpu_is_omap446x()) |
451 | omap_pm_ops.hotplug_restart = omap4460_secondary_startup; |
452 | |
453 | return 0; |
454 | } |
455 | |
456 | #endif |
457 | |
458 | u32 omap4_get_cpu1_ns_pa_addr(void) |
459 | { |
460 | return old_cpu1_ns_pa_addr; |
461 | } |
462 | |
463 | /* |
464 | * For kexec, we must set CPU1_WAKEUP_NS_PA_ADDR to point to |
465 | * current kernel's secondary_startup() early before |
466 | * clockdomains_init(). Otherwise clockdomain_init() can |
467 | * wake CPU1 and cause a hang. |
468 | */ |
469 | void __init omap4_mpuss_early_init(void) |
470 | { |
471 | unsigned long startup_pa; |
472 | void __iomem *ns_pa_addr; |
473 | |
474 | if (!(soc_is_omap44xx() || soc_is_omap54xx())) |
475 | return; |
476 | |
477 | sar_base = omap4_get_sar_ram_base(); |
478 | |
479 | /* Save old NS_PA_ADDR for validity checks later on */ |
480 | if (soc_is_omap44xx()) |
481 | ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET; |
482 | else |
483 | ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET; |
484 | old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr); |
485 | |
486 | if (soc_is_omap443x()) |
487 | startup_pa = __pa_symbol(omap4_secondary_startup); |
488 | else if (soc_is_omap446x()) |
489 | startup_pa = __pa_symbol(omap4460_secondary_startup); |
490 | else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) |
491 | startup_pa = __pa_symbol(omap5_secondary_hyp_startup); |
492 | else |
493 | startup_pa = __pa_symbol(omap5_secondary_startup); |
494 | |
495 | if (soc_is_omap44xx()) |
496 | writel_relaxed(startup_pa, sar_base + |
497 | CPU1_WAKEUP_NS_PA_ADDR_OFFSET); |
498 | else |
499 | writel_relaxed(startup_pa, sar_base + |
500 | OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET); |
501 | } |
502 | |