1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Low level PM code for TI EMIF
4 *
5 * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
6 * Dave Gerlach
7 */
8
9#include <linux/linkage.h>
10#include <asm/assembler.h>
11#include <asm/page.h>
12
13#include "emif.h"
14#include "ti-emif-asm-offsets.h"
15
16#define EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES 0x00a0
17#define EMIF_POWER_MGMT_SR_TIMER_MASK 0x00f0
18#define EMIF_POWER_MGMT_SELF_REFRESH_MODE 0x0200
19#define EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK 0x0700
20
21#define EMIF_SDCFG_TYPE_DDR2 0x2 << SDRAM_TYPE_SHIFT
22#define EMIF_SDCFG_TYPE_DDR3 0x3 << SDRAM_TYPE_SHIFT
23#define EMIF_STATUS_READY 0x4
24
25#define AM43XX_EMIF_PHY_CTRL_REG_COUNT 0x120
26
27#define EMIF_AM437X_REGISTERS 0x1
28
29 .arm
30 .align 3
31 .arch armv7-a
32
33ENTRY(ti_emif_sram)
34
35/*
36 * void ti_emif_save_context(void)
37 *
38 * Used during suspend to save the context of all required EMIF registers
39 * to local memory if the EMIF is going to lose context during the sleep
40 * transition. Operates on the VIRTUAL address of the EMIF.
41 */
42ENTRY(ti_emif_save_context)
43 stmfd sp!, {r4 - r11, lr} @ save registers on stack
44
45 adr r4, ti_emif_pm_sram_data
46 ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
47 ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
48
49 /* Save EMIF configuration */
50 ldr r1, [r0, #EMIF_SDRAM_CONFIG]
51 str r1, [r2, #EMIF_SDCFG_VAL_OFFSET]
52
53 ldr r1, [r0, #EMIF_SDRAM_REFRESH_CONTROL]
54 str r1, [r2, #EMIF_REF_CTRL_VAL_OFFSET]
55
56 ldr r1, [r0, #EMIF_SDRAM_TIMING_1]
57 str r1, [r2, #EMIF_TIMING1_VAL_OFFSET]
58
59 ldr r1, [r0, #EMIF_SDRAM_TIMING_2]
60 str r1, [r2, #EMIF_TIMING2_VAL_OFFSET]
61
62 ldr r1, [r0, #EMIF_SDRAM_TIMING_3]
63 str r1, [r2, #EMIF_TIMING3_VAL_OFFSET]
64
65 ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
66 str r1, [r2, #EMIF_PMCR_VAL_OFFSET]
67
68 ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CTRL_SHDW]
69 str r1, [r2, #EMIF_PMCR_SHDW_VAL_OFFSET]
70
71 ldr r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
72 str r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
73
74 ldr r1, [r0, #EMIF_DDR_PHY_CTRL_1]
75 str r1, [r2, #EMIF_DDR_PHY_CTLR_1_OFFSET]
76
77 ldr r1, [r0, #EMIF_COS_CONFIG]
78 str r1, [r2, #EMIF_COS_CONFIG_OFFSET]
79
80 ldr r1, [r0, #EMIF_PRIORITY_TO_CLASS_OF_SERVICE_MAPPING]
81 str r1, [r2, #EMIF_PRIORITY_TO_COS_MAPPING_OFFSET]
82
83 ldr r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_1_MAPPING]
84 str r1, [r2, #EMIF_CONNECT_ID_SERV_1_MAP_OFFSET]
85
86 ldr r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_2_MAPPING]
87 str r1, [r2, #EMIF_CONNECT_ID_SERV_2_MAP_OFFSET]
88
89 ldr r1, [r0, #EMIF_OCP_CONFIG]
90 str r1, [r2, #EMIF_OCP_CONFIG_VAL_OFFSET]
91
92 ldr r5, [r4, #EMIF_PM_CONFIG_OFFSET]
93 cmp r5, #EMIF_SRAM_AM43_REG_LAYOUT
94 bne emif_skip_save_extra_regs
95
96 ldr r1, [r0, #EMIF_READ_WRITE_LEVELING_RAMP_CONTROL]
97 str r1, [r2, #EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET]
98
99 ldr r1, [r0, #EMIF_READ_WRITE_EXECUTION_THRESHOLD]
100 str r1, [r2, #EMIF_RD_WR_EXEC_THRESH_OFFSET]
101
102 ldr r1, [r0, #EMIF_LPDDR2_NVM_TIMING]
103 str r1, [r2, #EMIF_LPDDR2_NVM_TIM_OFFSET]
104
105 ldr r1, [r0, #EMIF_LPDDR2_NVM_TIMING_SHDW]
106 str r1, [r2, #EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET]
107
108 ldr r1, [r0, #EMIF_DLL_CALIB_CTRL]
109 str r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_OFFSET]
110
111 ldr r1, [r0, #EMIF_DLL_CALIB_CTRL_SHDW]
112 str r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET]
113
114 /* Loop and save entire block of emif phy regs */
115 mov r5, #0x0
116 add r4, r2, #EMIF_EXT_PHY_CTRL_VALS_OFFSET
117 add r3, r0, #EMIF_EXT_PHY_CTRL_1
118ddr_phy_ctrl_save:
119 ldr r1, [r3, r5]
120 str r1, [r4, r5]
121 add r5, r5, #0x4
122 cmp r5, #AM43XX_EMIF_PHY_CTRL_REG_COUNT
123 bne ddr_phy_ctrl_save
124
125emif_skip_save_extra_regs:
126 ldmfd sp!, {r4 - r11, pc} @ restore regs and return
127ENDPROC(ti_emif_save_context)
128
129/*
130 * void ti_emif_restore_context(void)
131 *
132 * Used during resume to restore the context of all required EMIF registers
133 * from local memory after the EMIF has lost context during a sleep transition.
134 * Operates on the PHYSICAL address of the EMIF.
135 */
136ENTRY(ti_emif_restore_context)
137 adr r4, ti_emif_pm_sram_data
138 ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
139 ldr r2, [r4, #EMIF_PM_REGS_PHYS_OFFSET]
140
141 /* Config EMIF Timings */
142 ldr r1, [r2, #EMIF_DDR_PHY_CTLR_1_OFFSET]
143 str r1, [r0, #EMIF_DDR_PHY_CTRL_1]
144 str r1, [r0, #EMIF_DDR_PHY_CTRL_1_SHDW]
145
146 ldr r1, [r2, #EMIF_TIMING1_VAL_OFFSET]
147 str r1, [r0, #EMIF_SDRAM_TIMING_1]
148 str r1, [r0, #EMIF_SDRAM_TIMING_1_SHDW]
149
150 ldr r1, [r2, #EMIF_TIMING2_VAL_OFFSET]
151 str r1, [r0, #EMIF_SDRAM_TIMING_2]
152 str r1, [r0, #EMIF_SDRAM_TIMING_2_SHDW]
153
154 ldr r1, [r2, #EMIF_TIMING3_VAL_OFFSET]
155 str r1, [r0, #EMIF_SDRAM_TIMING_3]
156 str r1, [r0, #EMIF_SDRAM_TIMING_3_SHDW]
157
158 ldr r1, [r2, #EMIF_REF_CTRL_VAL_OFFSET]
159 str r1, [r0, #EMIF_SDRAM_REFRESH_CONTROL]
160 str r1, [r0, #EMIF_SDRAM_REFRESH_CTRL_SHDW]
161
162 ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET]
163 str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
164
165 ldr r1, [r2, #EMIF_PMCR_SHDW_VAL_OFFSET]
166 str r1, [r0, #EMIF_POWER_MANAGEMENT_CTRL_SHDW]
167
168 ldr r1, [r2, #EMIF_COS_CONFIG_OFFSET]
169 str r1, [r0, #EMIF_COS_CONFIG]
170
171 ldr r1, [r2, #EMIF_PRIORITY_TO_COS_MAPPING_OFFSET]
172 str r1, [r0, #EMIF_PRIORITY_TO_CLASS_OF_SERVICE_MAPPING]
173
174 ldr r1, [r2, #EMIF_CONNECT_ID_SERV_1_MAP_OFFSET]
175 str r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_1_MAPPING]
176
177 ldr r1, [r2, #EMIF_CONNECT_ID_SERV_2_MAP_OFFSET]
178 str r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_2_MAPPING]
179
180 ldr r1, [r2, #EMIF_OCP_CONFIG_VAL_OFFSET]
181 str r1, [r0, #EMIF_OCP_CONFIG]
182
183 ldr r5, [r4, #EMIF_PM_CONFIG_OFFSET]
184 cmp r5, #EMIF_SRAM_AM43_REG_LAYOUT
185 bne emif_skip_restore_extra_regs
186
187 ldr r1, [r2, #EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET]
188 str r1, [r0, #EMIF_READ_WRITE_LEVELING_RAMP_CONTROL]
189
190 ldr r1, [r2, #EMIF_RD_WR_EXEC_THRESH_OFFSET]
191 str r1, [r0, #EMIF_READ_WRITE_EXECUTION_THRESHOLD]
192
193 ldr r1, [r2, #EMIF_LPDDR2_NVM_TIM_OFFSET]
194 str r1, [r0, #EMIF_LPDDR2_NVM_TIMING]
195
196 ldr r1, [r2, #EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET]
197 str r1, [r0, #EMIF_LPDDR2_NVM_TIMING_SHDW]
198
199 ldr r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_OFFSET]
200 str r1, [r0, #EMIF_DLL_CALIB_CTRL]
201
202 ldr r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET]
203 str r1, [r0, #EMIF_DLL_CALIB_CTRL_SHDW]
204
205 ldr r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
206 str r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
207
208 /* Loop and restore entire block of emif phy regs */
209 mov r5, #0x0
210 /* Load ti_emif_regs_amx3 + EMIF_EXT_PHY_CTRL_VALS_OFFSET for address
211 * to phy register save space
212 */
213 add r3, r2, #EMIF_EXT_PHY_CTRL_VALS_OFFSET
214 add r4, r0, #EMIF_EXT_PHY_CTRL_1
215ddr_phy_ctrl_restore:
216 ldr r1, [r3, r5]
217 str r1, [r4, r5]
218 add r5, r5, #0x4
219 cmp r5, #AM43XX_EMIF_PHY_CTRL_REG_COUNT
220 bne ddr_phy_ctrl_restore
221
222emif_skip_restore_extra_regs:
223 /*
224 * Output impedence calib needed only for DDR3
225 * but since the initial state of this will be
226 * disabled for DDR2 no harm in restoring the
227 * old configuration
228 */
229 ldr r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
230 str r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
231
232 /* Write to sdcfg last for DDR2 only */
233 ldr r1, [r2, #EMIF_SDCFG_VAL_OFFSET]
234 and r2, r1, #SDRAM_TYPE_MASK
235 cmp r2, #EMIF_SDCFG_TYPE_DDR2
236 streq r1, [r0, #EMIF_SDRAM_CONFIG]
237
238 mov pc, lr
239ENDPROC(ti_emif_restore_context)
240
241/*
242 * void ti_emif_run_hw_leveling(void)
243 *
244 * Used during resume to run hardware leveling again and restore the
245 * configuration of the EMIF PHY, only for DDR3.
246 */
247ENTRY(ti_emif_run_hw_leveling)
248 adr r4, ti_emif_pm_sram_data
249 ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
250
251 ldr r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
252 orr r3, r3, #RDWRLVLFULL_START
253 ldr r2, [r0, #EMIF_SDRAM_CONFIG]
254 and r2, r2, #SDRAM_TYPE_MASK
255 cmp r2, #EMIF_SDCFG_TYPE_DDR3
256 bne skip_hwlvl
257
258 str r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
259
260 /*
261 * If EMIF registers are touched during initial stage of HW
262 * leveling sequence there will be an L3 NOC timeout error issued
263 * as the EMIF will not respond, which is not fatal, but it is
264 * avoidable. This small wait loop is enough time for this condition
265 * to clear, even at worst case of CPU running at max speed of 1Ghz.
266 */
267 mov r2, #0x2000
2681:
269 subs r2, r2, #0x1
270 bne 1b
271
272 /* Bit clears when operation is complete */
2732: ldr r1, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
274 tst r1, #RDWRLVLFULL_START
275 bne 2b
276
277skip_hwlvl:
278 mov pc, lr
279ENDPROC(ti_emif_run_hw_leveling)
280
281/*
282 * void ti_emif_enter_sr(void)
283 *
284 * Programs the EMIF to tell the SDRAM to enter into self-refresh
285 * mode during a sleep transition. Operates on the VIRTUAL address
286 * of the EMIF.
287 */
288ENTRY(ti_emif_enter_sr)
289 stmfd sp!, {r4 - r11, lr} @ save registers on stack
290
291 adr r4, ti_emif_pm_sram_data
292 ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
293 ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
294
295 ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
296 bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
297 orr r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE
298 str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
299
300 ldmfd sp!, {r4 - r11, pc} @ restore regs and return
301ENDPROC(ti_emif_enter_sr)
302
303/*
304 * void ti_emif_exit_sr(void)
305 *
306 * Programs the EMIF to tell the SDRAM to exit self-refresh mode
307 * after a sleep transition. Operates on the PHYSICAL address of
308 * the EMIF.
309 */
310ENTRY(ti_emif_exit_sr)
311 adr r4, ti_emif_pm_sram_data
312 ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
313 ldr r2, [r4, #EMIF_PM_REGS_PHYS_OFFSET]
314
315 /*
316 * Toggle EMIF to exit refresh mode:
317 * if EMIF lost context, PWR_MGT_CTRL is currently 0, writing disable
318 * (0x0), wont do diddly squat! so do a toggle from SR(0x2) to disable
319 * (0x0) here.
320 * *If* EMIF did not lose context, nothing broken as we write the same
321 * value(0x2) to reg before we write a disable (0x0).
322 */
323 ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET]
324 bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
325 orr r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE
326 str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
327 bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
328 str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
329
330 /* Wait for EMIF to become ready */
3311: ldr r1, [r0, #EMIF_STATUS]
332 tst r1, #EMIF_STATUS_READY
333 beq 1b
334
335 mov pc, lr
336ENDPROC(ti_emif_exit_sr)
337
338/*
339 * void ti_emif_abort_sr(void)
340 *
341 * Disables self-refresh after a failed transition to a low-power
342 * state so the kernel can jump back to DDR and follow abort path.
343 * Operates on the VIRTUAL address of the EMIF.
344 */
345ENTRY(ti_emif_abort_sr)
346 stmfd sp!, {r4 - r11, lr} @ save registers on stack
347
348 adr r4, ti_emif_pm_sram_data
349 ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
350 ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
351
352 ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET]
353 bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
354 str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
355
356 /* Wait for EMIF to become ready */
3571: ldr r1, [r0, #EMIF_STATUS]
358 tst r1, #EMIF_STATUS_READY
359 beq 1b
360
361 ldmfd sp!, {r4 - r11, pc} @ restore regs and return
362ENDPROC(ti_emif_abort_sr)
363
364 .align 3
365ENTRY(ti_emif_pm_sram_data)
366 .space EMIF_PM_DATA_SIZE
367ENTRY(ti_emif_sram_sz)
368 .word . - ti_emif_save_context
369

source code of linux/drivers/memory/ti-emif-sram-pm.S