1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * Low level suspend code for AM33XX SoCs |
4 | * |
5 | * Copyright (C) 2012-2018 Texas Instruments Incorporated - https://www.ti.com/ |
6 | * Dave Gerlach, Vaibhav Bedia |
7 | */ |
8 | |
9 | #include <linux/linkage.h> |
10 | #include <linux/platform_data/pm33xx.h> |
11 | #include <linux/ti-emif-sram.h> |
12 | #include <asm/assembler.h> |
13 | #include <asm/page.h> |
14 | |
15 | #include "iomap.h" |
16 | #include "cm33xx.h" |
17 | #include "pm-asm-offsets.h" |
18 | |
19 | #define AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED 0x00030000 |
20 | #define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE 0x0003 |
21 | #define AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE 0x0002 |
22 | |
23 | /* replicated define because linux/bitops.h cannot be included in assembly */ |
24 | #define BIT(nr) (1 << (nr)) |
25 | |
26 | .arm |
27 | .arch armv7-a |
28 | .align 3 |
29 | |
30 | ENTRY(am33xx_do_wfi) |
31 | stmfd sp!, {r4 - r11, lr} @ save registers on stack |
32 | |
33 | /* Save wfi_flags arg to data space */ |
34 | mov r4, r0 |
35 | adr r3, am33xx_pm_ro_sram_data |
36 | ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET] |
37 | str r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET] |
38 | |
39 | /* Only flush cache is we know we are losing MPU context */ |
40 | tst r4, #WFI_FLAG_FLUSH_CACHE |
41 | beq cache_skip_flush |
42 | |
43 | /* |
44 | * Flush all data from the L1 and L2 data cache before disabling |
45 | * SCTLR.C bit. |
46 | */ |
47 | ldr r1, kernel_flush |
48 | blx r1 |
49 | |
50 | /* |
51 | * Clear the SCTLR.C bit to prevent further data cache |
52 | * allocation. Clearing SCTLR.C would make all the data accesses |
53 | * strongly ordered and would not hit the cache. |
54 | */ |
55 | mrc p15, 0, r0, c1, c0, 0 |
56 | bic r0, r0, #(1 << 2) @ Disable the C bit |
57 | mcr p15, 0, r0, c1, c0, 0 |
58 | isb |
59 | |
60 | /* |
61 | * Invalidate L1 and L2 data cache. |
62 | */ |
63 | ldr r1, kernel_flush |
64 | blx r1 |
65 | |
66 | adr r3, am33xx_pm_ro_sram_data |
67 | ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET] |
68 | ldr r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET] |
69 | |
70 | cache_skip_flush: |
71 | /* Check if we want self refresh */ |
72 | tst r4, #WFI_FLAG_SELF_REFRESH |
73 | beq emif_skip_enter_sr |
74 | |
75 | adr r9, am33xx_emif_sram_table |
76 | |
77 | ldr r3, [r9, #EMIF_PM_ENTER_SR_OFFSET] |
78 | blx r3 |
79 | |
80 | emif_skip_enter_sr: |
81 | /* Only necessary if PER is losing context */ |
82 | tst r4, #WFI_FLAG_SAVE_EMIF |
83 | beq emif_skip_save |
84 | |
85 | ldr r3, [r9, #EMIF_PM_SAVE_CONTEXT_OFFSET] |
86 | blx r3 |
87 | |
88 | emif_skip_save: |
89 | /* Only can disable EMIF if we have entered self refresh */ |
90 | tst r4, #WFI_FLAG_SELF_REFRESH |
91 | beq emif_skip_disable |
92 | |
93 | /* Disable EMIF */ |
94 | ldr r1, virt_emif_clkctrl |
95 | ldr r2, [r1] |
96 | bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE |
97 | str r2, [r1] |
98 | |
99 | ldr r1, virt_emif_clkctrl |
100 | wait_emif_disable: |
101 | ldr r2, [r1] |
102 | mov r3, #AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED |
103 | cmp r2, r3 |
104 | bne wait_emif_disable |
105 | |
106 | emif_skip_disable: |
107 | tst r4, #WFI_FLAG_WAKE_M3 |
108 | beq wkup_m3_skip |
109 | |
110 | /* |
111 | * For the MPU WFI to be registered as an interrupt |
112 | * to WKUP_M3, MPU_CLKCTRL.MODULEMODE needs to be set |
113 | * to DISABLED |
114 | */ |
115 | ldr r1, virt_mpu_clkctrl |
116 | ldr r2, [r1] |
117 | bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE |
118 | str r2, [r1] |
119 | |
120 | wkup_m3_skip: |
121 | /* |
122 | * Execute an ISB instruction to ensure that all of the |
123 | * CP15 register changes have been committed. |
124 | */ |
125 | isb |
126 | |
127 | /* |
128 | * Execute a barrier instruction to ensure that all cache, |
129 | * TLB and branch predictor maintenance operations issued |
130 | * have completed. |
131 | */ |
132 | dsb |
133 | dmb |
134 | |
135 | /* |
136 | * Execute a WFI instruction and wait until the |
137 | * STANDBYWFI output is asserted to indicate that the |
138 | * CPU is in idle and low power state. CPU can specualatively |
139 | * prefetch the instructions so add NOPs after WFI. Thirteen |
140 | * NOPs as per Cortex-A8 pipeline. |
141 | */ |
142 | wfi |
143 | |
144 | nop |
145 | nop |
146 | nop |
147 | nop |
148 | nop |
149 | nop |
150 | nop |
151 | nop |
152 | nop |
153 | nop |
154 | nop |
155 | nop |
156 | nop |
157 | |
158 | /* We come here in case of an abort due to a late interrupt */ |
159 | |
160 | /* Set MPU_CLKCTRL.MODULEMODE back to ENABLE */ |
161 | ldr r1, virt_mpu_clkctrl |
162 | mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE |
163 | str r2, [r1] |
164 | |
165 | /* Re-enable EMIF */ |
166 | ldr r1, virt_emif_clkctrl |
167 | mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE |
168 | str r2, [r1] |
169 | wait_emif_enable: |
170 | ldr r3, [r1] |
171 | cmp r2, r3 |
172 | bne wait_emif_enable |
173 | |
174 | /* Only necessary if PER is losing context */ |
175 | tst r4, #WFI_FLAG_SELF_REFRESH |
176 | beq emif_skip_exit_sr_abt |
177 | |
178 | adr r9, am33xx_emif_sram_table |
179 | ldr r1, [r9, #EMIF_PM_ABORT_SR_OFFSET] |
180 | blx r1 |
181 | |
182 | emif_skip_exit_sr_abt: |
183 | tst r4, #WFI_FLAG_FLUSH_CACHE |
184 | beq cache_skip_restore |
185 | |
186 | /* |
187 | * Set SCTLR.C bit to allow data cache allocation |
188 | */ |
189 | mrc p15, 0, r0, c1, c0, 0 |
190 | orr r0, r0, #(1 << 2) @ Enable the C bit |
191 | mcr p15, 0, r0, c1, c0, 0 |
192 | isb |
193 | |
194 | cache_skip_restore: |
195 | /* Let the suspend code know about the abort */ |
196 | mov r0, #1 |
197 | ldmfd sp!, {r4 - r11, pc} @ restore regs and return |
198 | ENDPROC(am33xx_do_wfi) |
199 | |
200 | .align |
201 | ENTRY(am33xx_resume_offset) |
202 | .word . - am33xx_do_wfi |
203 | |
204 | ENTRY(am33xx_resume_from_deep_sleep) |
205 | /* Re-enable EMIF */ |
206 | ldr r0, phys_emif_clkctrl |
207 | mov r1, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE |
208 | str r1, [r0] |
209 | wait_emif_enable1: |
210 | ldr r2, [r0] |
211 | cmp r1, r2 |
212 | bne wait_emif_enable1 |
213 | |
214 | adr r9, am33xx_emif_sram_table |
215 | |
216 | ldr r1, [r9, #EMIF_PM_RESTORE_CONTEXT_OFFSET] |
217 | blx r1 |
218 | |
219 | ldr r1, [r9, #EMIF_PM_EXIT_SR_OFFSET] |
220 | blx r1 |
221 | |
222 | resume_to_ddr: |
223 | /* We are back. Branch to the common CPU resume routine */ |
224 | mov r0, #0 |
225 | ldr pc, resume_addr |
226 | ENDPROC(am33xx_resume_from_deep_sleep) |
227 | |
228 | /* |
229 | * Local variables |
230 | */ |
231 | .align |
232 | kernel_flush: |
233 | .word v7_flush_dcache_all |
234 | virt_mpu_clkctrl: |
235 | .word AM33XX_CM_MPU_MPU_CLKCTRL |
236 | virt_emif_clkctrl: |
237 | .word AM33XX_CM_PER_EMIF_CLKCTRL |
238 | phys_emif_clkctrl: |
239 | .word (AM33XX_CM_BASE + AM33XX_CM_PER_MOD + \ |
240 | AM33XX_CM_PER_EMIF_CLKCTRL_OFFSET) |
241 | |
242 | .align 3 |
243 | /* DDR related defines */ |
244 | am33xx_emif_sram_table: |
245 | .space EMIF_PM_FUNCTIONS_SIZE |
246 | |
247 | ENTRY(am33xx_pm_sram) |
248 | .word am33xx_do_wfi |
249 | .word am33xx_do_wfi_sz |
250 | .word am33xx_resume_offset |
251 | .word am33xx_emif_sram_table |
252 | .word am33xx_pm_ro_sram_data |
253 | |
254 | resume_addr: |
255 | .word cpu_resume - PAGE_OFFSET + 0x80000000 |
256 | |
257 | .align 3 |
258 | ENTRY(am33xx_pm_ro_sram_data) |
259 | .space AMX3_PM_RO_SRAM_DATA_SIZE |
260 | |
261 | ENTRY(am33xx_do_wfi_sz) |
262 | .word . - am33xx_do_wfi |
263 | |