1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * linux/arch/arm/mm/proc-mohawk.S: MMU functions for Marvell PJ1 core |
4 | * |
5 | * PJ1 (codename Mohawk) is a hybrid of the xscale3 and Marvell's own core. |
6 | * |
7 | * Heavily based on proc-arm926.S and proc-xsc3.S |
8 | */ |
9 | |
10 | #include <linux/linkage.h> |
11 | #include <linux/init.h> |
12 | #include <linux/pgtable.h> |
13 | #include <asm/assembler.h> |
14 | #include <asm/hwcap.h> |
15 | #include <asm/pgtable-hwdef.h> |
16 | #include <asm/page.h> |
17 | #include <asm/ptrace.h> |
18 | #include "proc-macros.S" |
19 | |
20 | /* |
21 | * This is the maximum size of an area which will be flushed. If the |
22 | * area is larger than this, then we flush the whole cache. |
23 | */ |
24 | #define CACHE_DLIMIT 32768 |
25 | |
26 | /* |
27 | * The cache line size of the L1 D cache. |
28 | */ |
29 | #define CACHE_DLINESIZE 32 |
30 | |
31 | /* |
32 | * cpu_mohawk_proc_init() |
33 | */ |
34 | ENTRY(cpu_mohawk_proc_init) |
35 | ret lr |
36 | |
37 | /* |
38 | * cpu_mohawk_proc_fin() |
39 | */ |
40 | ENTRY(cpu_mohawk_proc_fin) |
41 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
42 | bic r0, r0, #0x1800 @ ...iz........... |
43 | bic r0, r0, #0x0006 @ .............ca. |
44 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
45 | ret lr |
46 | |
47 | /* |
48 | * cpu_mohawk_reset(loc) |
49 | * |
50 | * Perform a soft reset of the system. Put the CPU into the |
51 | * same state as it would be if it had been reset, and branch |
52 | * to what would be the reset vector. |
53 | * |
54 | * loc: location to jump to for soft reset |
55 | * |
56 | * (same as arm926) |
57 | */ |
58 | .align 5 |
59 | .pushsection .idmap.text, "ax" |
60 | ENTRY(cpu_mohawk_reset) |
61 | mov ip, #0 |
62 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches |
63 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
64 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs |
65 | mrc p15, 0, ip, c1, c0, 0 @ ctrl register |
66 | bic ip, ip, #0x0007 @ .............cam |
67 | bic ip, ip, #0x1100 @ ...i...s........ |
68 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
69 | ret r0 |
70 | ENDPROC(cpu_mohawk_reset) |
71 | .popsection |
72 | |
73 | /* |
74 | * cpu_mohawk_do_idle() |
75 | * |
76 | * Called with IRQs disabled |
77 | */ |
78 | .align 5 |
79 | ENTRY(cpu_mohawk_do_idle) |
80 | mov r0, #0 |
81 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
82 | mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt |
83 | ret lr |
84 | |
85 | /* |
86 | * flush_icache_all() |
87 | * |
88 | * Unconditionally clean and invalidate the entire icache. |
89 | */ |
90 | ENTRY(mohawk_flush_icache_all) |
91 | mov r0, #0 |
92 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache |
93 | ret lr |
94 | ENDPROC(mohawk_flush_icache_all) |
95 | |
96 | /* |
97 | * flush_user_cache_all() |
98 | * |
99 | * Clean and invalidate all cache entries in a particular |
100 | * address space. |
101 | */ |
102 | ENTRY(mohawk_flush_user_cache_all) |
103 | /* FALLTHROUGH */ |
104 | |
105 | /* |
106 | * flush_kern_cache_all() |
107 | * |
108 | * Clean and invalidate the entire cache. |
109 | */ |
110 | ENTRY(mohawk_flush_kern_cache_all) |
111 | mov r2, #VM_EXEC |
112 | mov ip, #0 |
113 | __flush_whole_cache: |
114 | mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache |
115 | tst r2, #VM_EXEC |
116 | mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache |
117 | mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer |
118 | ret lr |
119 | |
120 | /* |
121 | * flush_user_cache_range(start, end, flags) |
122 | * |
123 | * Clean and invalidate a range of cache entries in the |
124 | * specified address range. |
125 | * |
126 | * - start - start address (inclusive) |
127 | * - end - end address (exclusive) |
128 | * - flags - vm_flags describing address space |
129 | * |
130 | * (same as arm926) |
131 | */ |
132 | ENTRY(mohawk_flush_user_cache_range) |
133 | mov ip, #0 |
134 | sub r3, r1, r0 @ calculate total size |
135 | cmp r3, #CACHE_DLIMIT |
136 | bgt __flush_whole_cache |
137 | 1: tst r2, #VM_EXEC |
138 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry |
139 | mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry |
140 | add r0, r0, #CACHE_DLINESIZE |
141 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry |
142 | mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry |
143 | add r0, r0, #CACHE_DLINESIZE |
144 | cmp r0, r1 |
145 | blo 1b |
146 | tst r2, #VM_EXEC |
147 | mcrne p15, 0, ip, c7, c10, 4 @ drain WB |
148 | ret lr |
149 | |
150 | /* |
151 | * coherent_kern_range(start, end) |
152 | * |
153 | * Ensure coherency between the Icache and the Dcache in the |
154 | * region described by start, end. If you have non-snooping |
155 | * Harvard caches, you need to implement this function. |
156 | * |
157 | * - start - virtual start address |
158 | * - end - virtual end address |
159 | */ |
160 | ENTRY(mohawk_coherent_kern_range) |
161 | /* FALLTHROUGH */ |
162 | |
163 | /* |
164 | * coherent_user_range(start, end) |
165 | * |
166 | * Ensure coherency between the Icache and the Dcache in the |
167 | * region described by start, end. If you have non-snooping |
168 | * Harvard caches, you need to implement this function. |
169 | * |
170 | * - start - virtual start address |
171 | * - end - virtual end address |
172 | * |
173 | * (same as arm926) |
174 | */ |
175 | ENTRY(mohawk_coherent_user_range) |
176 | bic r0, r0, #CACHE_DLINESIZE - 1 |
177 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
178 | mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry |
179 | add r0, r0, #CACHE_DLINESIZE |
180 | cmp r0, r1 |
181 | blo 1b |
182 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
183 | mov r0, #0 |
184 | ret lr |
185 | |
186 | /* |
187 | * flush_kern_dcache_area(void *addr, size_t size) |
188 | * |
189 | * Ensure no D cache aliasing occurs, either with itself or |
190 | * the I cache |
191 | * |
192 | * - addr - kernel address |
193 | * - size - region size |
194 | */ |
195 | ENTRY(mohawk_flush_kern_dcache_area) |
196 | add r1, r0, r1 |
197 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry |
198 | add r0, r0, #CACHE_DLINESIZE |
199 | cmp r0, r1 |
200 | blo 1b |
201 | mov r0, #0 |
202 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache |
203 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
204 | ret lr |
205 | |
206 | /* |
207 | * dma_inv_range(start, end) |
208 | * |
209 | * Invalidate (discard) the specified virtual address range. |
210 | * May not write back any entries. If 'start' or 'end' |
211 | * are not cache line aligned, those lines must be written |
212 | * back. |
213 | * |
214 | * - start - virtual start address |
215 | * - end - virtual end address |
216 | * |
217 | * (same as v4wb) |
218 | */ |
219 | mohawk_dma_inv_range: |
220 | tst r0, #CACHE_DLINESIZE - 1 |
221 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
222 | tst r1, #CACHE_DLINESIZE - 1 |
223 | mcrne p15, 0, r1, c7, c10, 1 @ clean D entry |
224 | bic r0, r0, #CACHE_DLINESIZE - 1 |
225 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry |
226 | add r0, r0, #CACHE_DLINESIZE |
227 | cmp r0, r1 |
228 | blo 1b |
229 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
230 | ret lr |
231 | |
232 | /* |
233 | * dma_clean_range(start, end) |
234 | * |
235 | * Clean the specified virtual address range. |
236 | * |
237 | * - start - virtual start address |
238 | * - end - virtual end address |
239 | * |
240 | * (same as v4wb) |
241 | */ |
242 | mohawk_dma_clean_range: |
243 | bic r0, r0, #CACHE_DLINESIZE - 1 |
244 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
245 | add r0, r0, #CACHE_DLINESIZE |
246 | cmp r0, r1 |
247 | blo 1b |
248 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
249 | ret lr |
250 | |
251 | /* |
252 | * dma_flush_range(start, end) |
253 | * |
254 | * Clean and invalidate the specified virtual address range. |
255 | * |
256 | * - start - virtual start address |
257 | * - end - virtual end address |
258 | */ |
259 | ENTRY(mohawk_dma_flush_range) |
260 | bic r0, r0, #CACHE_DLINESIZE - 1 |
261 | 1: |
262 | mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry |
263 | add r0, r0, #CACHE_DLINESIZE |
264 | cmp r0, r1 |
265 | blo 1b |
266 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
267 | ret lr |
268 | |
269 | /* |
270 | * dma_map_area(start, size, dir) |
271 | * - start - kernel virtual start address |
272 | * - size - size of region |
273 | * - dir - DMA direction |
274 | */ |
275 | ENTRY(mohawk_dma_map_area) |
276 | add r1, r1, r0 |
277 | cmp r2, #DMA_TO_DEVICE |
278 | beq mohawk_dma_clean_range |
279 | bcs mohawk_dma_inv_range |
280 | b mohawk_dma_flush_range |
281 | ENDPROC(mohawk_dma_map_area) |
282 | |
283 | /* |
284 | * dma_unmap_area(start, size, dir) |
285 | * - start - kernel virtual start address |
286 | * - size - size of region |
287 | * - dir - DMA direction |
288 | */ |
289 | ENTRY(mohawk_dma_unmap_area) |
290 | ret lr |
291 | ENDPROC(mohawk_dma_unmap_area) |
292 | |
293 | .globl mohawk_flush_kern_cache_louis |
294 | .equ mohawk_flush_kern_cache_louis, mohawk_flush_kern_cache_all |
295 | |
296 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
297 | define_cache_functions mohawk |
298 | |
299 | ENTRY(cpu_mohawk_dcache_clean_area) |
300 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
301 | add r0, r0, #CACHE_DLINESIZE |
302 | subs r1, r1, #CACHE_DLINESIZE |
303 | bhi 1b |
304 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
305 | ret lr |
306 | |
307 | /* |
308 | * cpu_mohawk_switch_mm(pgd) |
309 | * |
310 | * Set the translation base pointer to be as described by pgd. |
311 | * |
312 | * pgd: new page tables |
313 | */ |
314 | .align 5 |
315 | ENTRY(cpu_mohawk_switch_mm) |
316 | mov ip, #0 |
317 | mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache |
318 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache |
319 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
320 | orr r0, r0, #0x18 @ cache the page table in L2 |
321 | mcr p15, 0, r0, c2, c0, 0 @ load page table pointer |
322 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs |
323 | ret lr |
324 | |
325 | /* |
326 | * cpu_mohawk_set_pte_ext(ptep, pte, ext) |
327 | * |
328 | * Set a PTE and flush it out |
329 | */ |
330 | .align 5 |
331 | ENTRY(cpu_mohawk_set_pte_ext) |
332 | #ifdef CONFIG_MMU |
333 | armv3_set_pte_ext |
334 | mov r0, r0 |
335 | mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
336 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
337 | ret lr |
338 | #endif |
339 | |
340 | .globl cpu_mohawk_suspend_size |
341 | .equ cpu_mohawk_suspend_size, 4 * 6 |
342 | #ifdef CONFIG_ARM_CPU_SUSPEND |
343 | ENTRY(cpu_mohawk_do_suspend) |
344 | stmfd sp!, {r4 - r9, lr} |
345 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode |
346 | mrc p15, 0, r5, c15, c1, 0 @ CP access reg |
347 | mrc p15, 0, r6, c13, c0, 0 @ PID |
348 | mrc p15, 0, r7, c3, c0, 0 @ domain ID |
349 | mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg |
350 | mrc p15, 0, r9, c1, c0, 0 @ control reg |
351 | bic r4, r4, #2 @ clear frequency change bit |
352 | stmia r0, {r4 - r9} @ store cp regs |
353 | ldmia sp!, {r4 - r9, pc} |
354 | ENDPROC(cpu_mohawk_do_suspend) |
355 | |
356 | ENTRY(cpu_mohawk_do_resume) |
357 | ldmia r0, {r4 - r9} @ load cp regs |
358 | mov ip, #0 |
359 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB |
360 | mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer |
361 | mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer |
362 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs |
363 | mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. |
364 | mcr p15, 0, r5, c15, c1, 0 @ CP access reg |
365 | mcr p15, 0, r6, c13, c0, 0 @ PID |
366 | mcr p15, 0, r7, c3, c0, 0 @ domain ID |
367 | orr r1, r1, #0x18 @ cache the page table in L2 |
368 | mcr p15, 0, r1, c2, c0, 0 @ translation table base addr |
369 | mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg |
370 | mov r0, r9 @ control register |
371 | b cpu_resume_mmu |
372 | ENDPROC(cpu_mohawk_do_resume) |
373 | #endif |
374 | |
375 | .type __mohawk_setup, #function |
376 | __mohawk_setup: |
377 | mov r0, #0 |
378 | mcr p15, 0, r0, c7, c7 @ invalidate I,D caches |
379 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
380 | mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs |
381 | orr r4, r4, #0x18 @ cache the page table in L2 |
382 | mcr p15, 0, r4, c2, c0, 0 @ load page table pointer |
383 | |
384 | mov r0, #0 @ don't allow CP access |
385 | mcr p15, 0, r0, c15, c1, 0 @ write CP access register |
386 | |
387 | adr r5, mohawk_crval |
388 | ldmia r5, {r5, r6} |
389 | mrc p15, 0, r0, c1, c0 @ get control register |
390 | bic r0, r0, r5 |
391 | orr r0, r0, r6 |
392 | ret lr |
393 | |
394 | .size __mohawk_setup, . - __mohawk_setup |
395 | |
396 | /* |
397 | * R |
398 | * .RVI ZFRS BLDP WCAM |
399 | * .011 1001 ..00 0101 |
400 | * |
401 | */ |
402 | .type mohawk_crval, #object |
403 | mohawk_crval: |
404 | crval clear=0x00007f3f, mmuset=0x00003905, ucset=0x00001134 |
405 | |
406 | __INITDATA |
407 | |
408 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
409 | define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort |
410 | |
411 | .section ".rodata" |
412 | |
413 | string cpu_arch_name, "armv5te" |
414 | string cpu_elf_name, "v5" |
415 | string cpu_mohawk_name, "Marvell 88SV331x" |
416 | |
417 | .align |
418 | |
419 | .section ".proc.info.init" , "a" |
420 | |
421 | .type __88sv331x_proc_info,#object |
422 | __88sv331x_proc_info: |
423 | .long 0x56158000 @ Marvell 88SV331x (MOHAWK) |
424 | .long 0xfffff000 |
425 | .long PMD_TYPE_SECT | \ |
426 | PMD_SECT_BUFFERABLE | \ |
427 | PMD_SECT_CACHEABLE | \ |
428 | PMD_BIT4 | \ |
429 | PMD_SECT_AP_WRITE | \ |
430 | PMD_SECT_AP_READ |
431 | .long PMD_TYPE_SECT | \ |
432 | PMD_BIT4 | \ |
433 | PMD_SECT_AP_WRITE | \ |
434 | PMD_SECT_AP_READ |
435 | initfn __mohawk_setup, __88sv331x_proc_info |
436 | .long cpu_arch_name |
437 | .long cpu_elf_name |
438 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP |
439 | .long cpu_mohawk_name |
440 | .long mohawk_processor_functions |
441 | .long v4wbi_tlb_fns |
442 | .long v4wb_user_fns |
443 | .long mohawk_cache_fns |
444 | .size __88sv331x_proc_info, . - __88sv331x_proc_info |
445 | |