1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * linux/arch/arm/mm/cache-v4wt.S |
4 | * |
5 | * Copyright (C) 1997-2002 Russell king |
6 | * |
7 | * ARMv4 write through cache operations support. |
8 | * |
9 | * We assume that the write buffer is not enabled. |
10 | */ |
11 | #include <linux/linkage.h> |
12 | #include <linux/init.h> |
13 | #include <asm/assembler.h> |
14 | #include <asm/page.h> |
15 | #include "proc-macros.S" |
16 | |
17 | /* |
18 | * The size of one data cache line. |
19 | */ |
20 | #define CACHE_DLINESIZE 32 |
21 | |
22 | /* |
23 | * The number of data cache segments. |
24 | */ |
25 | #define CACHE_DSEGMENTS 8 |
26 | |
27 | /* |
28 | * The number of lines in a cache segment. |
29 | */ |
30 | #define CACHE_DENTRIES 64 |
31 | |
32 | /* |
33 | * This is the size at which it becomes more efficient to |
34 | * clean the whole cache, rather than using the individual |
35 | * cache line maintenance instructions. |
36 | * |
37 | * *** This needs benchmarking |
38 | */ |
39 | #define CACHE_DLIMIT 16384 |
40 | |
41 | /* |
42 | * flush_icache_all() |
43 | * |
44 | * Unconditionally clean and invalidate the entire icache. |
45 | */ |
46 | ENTRY(v4wt_flush_icache_all) |
47 | mov r0, #0 |
48 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache |
49 | ret lr |
50 | ENDPROC(v4wt_flush_icache_all) |
51 | |
52 | /* |
53 | * flush_user_cache_all() |
54 | * |
55 | * Invalidate all cache entries in a particular address |
56 | * space. |
57 | */ |
58 | ENTRY(v4wt_flush_user_cache_all) |
59 | /* FALLTHROUGH */ |
60 | /* |
61 | * flush_kern_cache_all() |
62 | * |
63 | * Clean and invalidate the entire cache. |
64 | */ |
65 | ENTRY(v4wt_flush_kern_cache_all) |
66 | mov r2, #VM_EXEC |
67 | mov ip, #0 |
68 | __flush_whole_cache: |
69 | tst r2, #VM_EXEC |
70 | mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache |
71 | mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache |
72 | ret lr |
73 | |
74 | /* |
75 | * flush_user_cache_range(start, end, flags) |
76 | * |
77 | * Clean and invalidate a range of cache entries in the specified |
78 | * address space. |
79 | * |
80 | * - start - start address (inclusive, page aligned) |
81 | * - end - end address (exclusive, page aligned) |
82 | * - flags - vma_area_struct flags describing address space |
83 | */ |
84 | ENTRY(v4wt_flush_user_cache_range) |
85 | sub r3, r1, r0 @ calculate total size |
86 | cmp r3, #CACHE_DLIMIT |
87 | bhs __flush_whole_cache |
88 | |
89 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry |
90 | tst r2, #VM_EXEC |
91 | mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry |
92 | add r0, r0, #CACHE_DLINESIZE |
93 | cmp r0, r1 |
94 | blo 1b |
95 | ret lr |
96 | |
97 | /* |
98 | * coherent_kern_range(start, end) |
99 | * |
100 | * Ensure coherency between the Icache and the Dcache in the |
101 | * region described by start. If you have non-snooping |
102 | * Harvard caches, you need to implement this function. |
103 | * |
104 | * - start - virtual start address |
105 | * - end - virtual end address |
106 | */ |
107 | ENTRY(v4wt_coherent_kern_range) |
108 | /* FALLTRHOUGH */ |
109 | |
110 | /* |
111 | * coherent_user_range(start, end) |
112 | * |
113 | * Ensure coherency between the Icache and the Dcache in the |
114 | * region described by start. If you have non-snooping |
115 | * Harvard caches, you need to implement this function. |
116 | * |
117 | * - start - virtual start address |
118 | * - end - virtual end address |
119 | */ |
120 | ENTRY(v4wt_coherent_user_range) |
121 | bic r0, r0, #CACHE_DLINESIZE - 1 |
122 | 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry |
123 | add r0, r0, #CACHE_DLINESIZE |
124 | cmp r0, r1 |
125 | blo 1b |
126 | mov r0, #0 |
127 | ret lr |
128 | |
129 | /* |
130 | * flush_kern_dcache_area(void *addr, size_t size) |
131 | * |
132 | * Ensure no D cache aliasing occurs, either with itself or |
133 | * the I cache |
134 | * |
135 | * - addr - kernel address |
136 | * - size - region size |
137 | */ |
138 | ENTRY(v4wt_flush_kern_dcache_area) |
139 | mov r2, #0 |
140 | mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache |
141 | add r1, r0, r1 |
142 | /* fallthrough */ |
143 | |
144 | /* |
145 | * dma_inv_range(start, end) |
146 | * |
147 | * Invalidate (discard) the specified virtual address range. |
148 | * May not write back any entries. If 'start' or 'end' |
149 | * are not cache line aligned, those lines must be written |
150 | * back. |
151 | * |
152 | * - start - virtual start address |
153 | * - end - virtual end address |
154 | */ |
155 | v4wt_dma_inv_range: |
156 | bic r0, r0, #CACHE_DLINESIZE - 1 |
157 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry |
158 | add r0, r0, #CACHE_DLINESIZE |
159 | cmp r0, r1 |
160 | blo 1b |
161 | ret lr |
162 | |
163 | /* |
164 | * dma_flush_range(start, end) |
165 | * |
166 | * Clean and invalidate the specified virtual address range. |
167 | * |
168 | * - start - virtual start address |
169 | * - end - virtual end address |
170 | */ |
171 | .globl v4wt_dma_flush_range |
172 | .equ v4wt_dma_flush_range, v4wt_dma_inv_range |
173 | |
174 | /* |
175 | * dma_unmap_area(start, size, dir) |
176 | * - start - kernel virtual start address |
177 | * - size - size of region |
178 | * - dir - DMA direction |
179 | */ |
180 | ENTRY(v4wt_dma_unmap_area) |
181 | add r1, r1, r0 |
182 | teq r2, #DMA_TO_DEVICE |
183 | bne v4wt_dma_inv_range |
184 | /* FALLTHROUGH */ |
185 | |
186 | /* |
187 | * dma_map_area(start, size, dir) |
188 | * - start - kernel virtual start address |
189 | * - size - size of region |
190 | * - dir - DMA direction |
191 | */ |
192 | ENTRY(v4wt_dma_map_area) |
193 | ret lr |
194 | ENDPROC(v4wt_dma_unmap_area) |
195 | ENDPROC(v4wt_dma_map_area) |
196 | |
197 | .globl v4wt_flush_kern_cache_louis |
198 | .equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all |
199 | |
200 | __INITDATA |
201 | |
202 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
203 | define_cache_functions v4wt |
204 | |