1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/arch/arm/mm/flush.c |
4 | * |
5 | * Copyright (C) 1995-2002 Russell King |
6 | */ |
7 | #include <linux/module.h> |
8 | #include <linux/mm.h> |
9 | #include <linux/pagemap.h> |
10 | #include <linux/highmem.h> |
11 | |
12 | #include <asm/cacheflush.h> |
13 | #include <asm/cachetype.h> |
14 | #include <asm/highmem.h> |
15 | #include <asm/smp_plat.h> |
16 | #include <asm/tlbflush.h> |
17 | #include <linux/hugetlb.h> |
18 | |
19 | #include "mm.h" |
20 | |
21 | #ifdef CONFIG_ARM_HEAVY_MB |
22 | void (*soc_mb)(void); |
23 | |
24 | void arm_heavy_mb(void) |
25 | { |
26 | #ifdef CONFIG_OUTER_CACHE_SYNC |
27 | if (outer_cache.sync) |
28 | outer_cache.sync(); |
29 | #endif |
30 | if (soc_mb) |
31 | soc_mb(); |
32 | } |
33 | EXPORT_SYMBOL(arm_heavy_mb); |
34 | #endif |
35 | |
36 | #ifdef CONFIG_CPU_CACHE_VIPT |
37 | |
38 | static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) |
39 | { |
40 | unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
41 | const int zero = 0; |
42 | |
43 | set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); |
44 | |
45 | asm( "mcrr p15, 0, %1, %0, c14\n" |
46 | " mcr p15, 0, %2, c7, c10, 4" |
47 | : |
48 | : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero) |
49 | : "cc" ); |
50 | } |
51 | |
52 | static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) |
53 | { |
54 | unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
55 | unsigned long offset = vaddr & (PAGE_SIZE - 1); |
56 | unsigned long to; |
57 | |
58 | set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); |
59 | to = va + offset; |
60 | flush_icache_range(to, to + len); |
61 | } |
62 | |
63 | void flush_cache_mm(struct mm_struct *mm) |
64 | { |
65 | if (cache_is_vivt()) { |
66 | vivt_flush_cache_mm(mm); |
67 | return; |
68 | } |
69 | |
70 | if (cache_is_vipt_aliasing()) { |
71 | asm( "mcr p15, 0, %0, c7, c14, 0\n" |
72 | " mcr p15, 0, %0, c7, c10, 4" |
73 | : |
74 | : "r" (0) |
75 | : "cc" ); |
76 | } |
77 | } |
78 | |
79 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
80 | { |
81 | if (cache_is_vivt()) { |
82 | vivt_flush_cache_range(vma, start, end); |
83 | return; |
84 | } |
85 | |
86 | if (cache_is_vipt_aliasing()) { |
87 | asm( "mcr p15, 0, %0, c7, c14, 0\n" |
88 | " mcr p15, 0, %0, c7, c10, 4" |
89 | : |
90 | : "r" (0) |
91 | : "cc" ); |
92 | } |
93 | |
94 | if (vma->vm_flags & VM_EXEC) |
95 | __flush_icache_all(); |
96 | } |
97 | |
98 | void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn, unsigned int nr) |
99 | { |
100 | if (cache_is_vivt()) { |
101 | vivt_flush_cache_pages(vma, user_addr, pfn, nr); |
102 | return; |
103 | } |
104 | |
105 | if (cache_is_vipt_aliasing()) { |
106 | flush_pfn_alias(pfn, user_addr); |
107 | __flush_icache_all(); |
108 | } |
109 | |
110 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) |
111 | __flush_icache_all(); |
112 | } |
113 | |
114 | #else |
115 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) |
116 | #define flush_icache_alias(pfn,vaddr,len) do { } while (0) |
117 | #endif |
118 | |
119 | #define FLAG_PA_IS_EXEC 1 |
120 | #define FLAG_PA_CORE_IN_MM 2 |
121 | |
122 | static void flush_ptrace_access_other(void *args) |
123 | { |
124 | __flush_icache_all(); |
125 | } |
126 | |
127 | static inline |
128 | void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr, |
129 | unsigned long len, unsigned int flags) |
130 | { |
131 | if (cache_is_vivt()) { |
132 | if (flags & FLAG_PA_CORE_IN_MM) { |
133 | unsigned long addr = (unsigned long)kaddr; |
134 | __cpuc_coherent_kern_range(addr, addr + len); |
135 | } |
136 | return; |
137 | } |
138 | |
139 | if (cache_is_vipt_aliasing()) { |
140 | flush_pfn_alias(page_to_pfn(page), uaddr); |
141 | __flush_icache_all(); |
142 | return; |
143 | } |
144 | |
145 | /* VIPT non-aliasing D-cache */ |
146 | if (flags & FLAG_PA_IS_EXEC) { |
147 | unsigned long addr = (unsigned long)kaddr; |
148 | if (icache_is_vipt_aliasing()) |
149 | flush_icache_alias(page_to_pfn(page), uaddr, len); |
150 | else |
151 | __cpuc_coherent_kern_range(addr, addr + len); |
152 | if (cache_ops_need_broadcast()) |
153 | smp_call_function(func: flush_ptrace_access_other, |
154 | NULL, wait: 1); |
155 | } |
156 | } |
157 | |
158 | static |
159 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
160 | unsigned long uaddr, void *kaddr, unsigned long len) |
161 | { |
162 | unsigned int flags = 0; |
163 | if (cpumask_test_cpu(smp_processor_id(), cpumask: mm_cpumask(mm: vma->vm_mm))) |
164 | flags |= FLAG_PA_CORE_IN_MM; |
165 | if (vma->vm_flags & VM_EXEC) |
166 | flags |= FLAG_PA_IS_EXEC; |
167 | __flush_ptrace_access(page, uaddr, kaddr, len, flags); |
168 | } |
169 | |
170 | void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, |
171 | void *kaddr, unsigned long len) |
172 | { |
173 | unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC; |
174 | |
175 | __flush_ptrace_access(page, uaddr, kaddr, len, flags); |
176 | } |
177 | |
178 | /* |
179 | * Copy user data from/to a page which is mapped into a different |
180 | * processes address space. Really, we want to allow our "user |
181 | * space" model to handle this. |
182 | * |
183 | * Note that this code needs to run on the current CPU. |
184 | */ |
185 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
186 | unsigned long uaddr, void *dst, const void *src, |
187 | unsigned long len) |
188 | { |
189 | #ifdef CONFIG_SMP |
190 | preempt_disable(); |
191 | #endif |
192 | memcpy(dst, src, len); |
193 | flush_ptrace_access(vma, page, uaddr, dst, len); |
194 | #ifdef CONFIG_SMP |
195 | preempt_enable(); |
196 | #endif |
197 | } |
198 | |
199 | void __flush_dcache_folio(struct address_space *mapping, struct folio *folio) |
200 | { |
201 | /* |
202 | * Writeback any data associated with the kernel mapping of this |
203 | * page. This ensures that data in the physical page is mutually |
204 | * coherent with the kernels mapping. |
205 | */ |
206 | if (!folio_test_highmem(folio)) { |
207 | __cpuc_flush_dcache_area(folio_address(folio), |
208 | folio_size(folio)); |
209 | } else { |
210 | unsigned long i; |
211 | if (cache_is_vipt_nonaliasing()) { |
212 | for (i = 0; i < folio_nr_pages(folio); i++) { |
213 | void *addr = kmap_local_folio(folio, |
214 | offset: i * PAGE_SIZE); |
215 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
216 | kunmap_local(addr); |
217 | } |
218 | } else { |
219 | for (i = 0; i < folio_nr_pages(folio); i++) { |
220 | void *addr = kmap_high_get(folio_page(folio, i)); |
221 | if (addr) { |
222 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
223 | kunmap_high(folio_page(folio, i)); |
224 | } |
225 | } |
226 | } |
227 | } |
228 | |
229 | /* |
230 | * If this is a page cache page, and we have an aliasing VIPT cache, |
231 | * we only need to do one flush - which would be at the relevant |
232 | * userspace colour, which is congruent with page->index. |
233 | */ |
234 | if (mapping && cache_is_vipt_aliasing()) |
235 | flush_pfn_alias(folio_pfn(folio), folio_pos(folio)); |
236 | } |
237 | |
238 | static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio) |
239 | { |
240 | struct mm_struct *mm = current->active_mm; |
241 | struct vm_area_struct *vma; |
242 | pgoff_t pgoff, pgoff_end; |
243 | |
244 | /* |
245 | * There are possible user space mappings of this page: |
246 | * - VIVT cache: we need to also write back and invalidate all user |
247 | * data in the current VM view associated with this page. |
248 | * - aliasing VIPT: we only need to find one mapping of this page. |
249 | */ |
250 | pgoff = folio->index; |
251 | pgoff_end = pgoff + folio_nr_pages(folio) - 1; |
252 | |
253 | flush_dcache_mmap_lock(mapping); |
254 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) { |
255 | unsigned long start, offset, pfn; |
256 | unsigned int nr; |
257 | |
258 | /* |
259 | * If this VMA is not in our MM, we can ignore it. |
260 | */ |
261 | if (vma->vm_mm != mm) |
262 | continue; |
263 | if (!(vma->vm_flags & VM_MAYSHARE)) |
264 | continue; |
265 | |
266 | start = vma->vm_start; |
267 | pfn = folio_pfn(folio); |
268 | nr = folio_nr_pages(folio); |
269 | offset = pgoff - vma->vm_pgoff; |
270 | if (offset > -nr) { |
271 | pfn -= offset; |
272 | nr += offset; |
273 | } else { |
274 | start += offset * PAGE_SIZE; |
275 | } |
276 | if (start + nr * PAGE_SIZE > vma->vm_end) |
277 | nr = (vma->vm_end - start) / PAGE_SIZE; |
278 | |
279 | flush_cache_pages(vma, start, pfn, nr); |
280 | } |
281 | flush_dcache_mmap_unlock(mapping); |
282 | } |
283 | |
284 | #if __LINUX_ARM_ARCH__ >= 6 |
285 | void __sync_icache_dcache(pte_t pteval) |
286 | { |
287 | unsigned long pfn; |
288 | struct folio *folio; |
289 | struct address_space *mapping; |
290 | |
291 | if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) |
292 | /* only flush non-aliasing VIPT caches for exec mappings */ |
293 | return; |
294 | pfn = pte_pfn(pteval); |
295 | if (!pfn_valid(pfn)) |
296 | return; |
297 | |
298 | folio = page_folio(pfn_to_page(pfn)); |
299 | if (folio_test_reserved(folio)) |
300 | return; |
301 | |
302 | if (cache_is_vipt_aliasing()) |
303 | mapping = folio_flush_mapping(folio); |
304 | else |
305 | mapping = NULL; |
306 | |
307 | if (!test_and_set_bit(PG_dcache_clean, &folio->flags)) |
308 | __flush_dcache_folio(mapping, folio); |
309 | |
310 | if (pte_exec(pteval)) |
311 | __flush_icache_all(); |
312 | } |
313 | #endif |
314 | |
315 | /* |
316 | * Ensure cache coherency between kernel mapping and userspace mapping |
317 | * of this page. |
318 | * |
319 | * We have three cases to consider: |
320 | * - VIPT non-aliasing cache: fully coherent so nothing required. |
321 | * - VIVT: fully aliasing, so we need to handle every alias in our |
322 | * current VM view. |
323 | * - VIPT aliasing: need to handle one alias in our current VM view. |
324 | * |
325 | * If we need to handle aliasing: |
326 | * If the page only exists in the page cache and there are no user |
327 | * space mappings, we can be lazy and remember that we may have dirty |
328 | * kernel cache lines for later. Otherwise, we assume we have |
329 | * aliasing mappings. |
330 | * |
331 | * Note that we disable the lazy flush for SMP configurations where |
332 | * the cache maintenance operations are not automatically broadcasted. |
333 | */ |
334 | void flush_dcache_folio(struct folio *folio) |
335 | { |
336 | struct address_space *mapping; |
337 | |
338 | /* |
339 | * The zero page is never written to, so never has any dirty |
340 | * cache lines, and therefore never needs to be flushed. |
341 | */ |
342 | if (is_zero_pfn(pfn: folio_pfn(folio))) |
343 | return; |
344 | |
345 | if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) { |
346 | if (test_bit(PG_dcache_clean, &folio->flags)) |
347 | clear_bit(nr: PG_dcache_clean, addr: &folio->flags); |
348 | return; |
349 | } |
350 | |
351 | mapping = folio_flush_mapping(folio); |
352 | |
353 | if (!cache_ops_need_broadcast() && |
354 | mapping && !folio_mapped(folio)) |
355 | clear_bit(nr: PG_dcache_clean, addr: &folio->flags); |
356 | else { |
357 | __flush_dcache_folio(mapping, folio); |
358 | if (mapping && cache_is_vivt()) |
359 | __flush_dcache_aliases(mapping, folio); |
360 | else if (mapping) |
361 | __flush_icache_all(); |
362 | set_bit(nr: PG_dcache_clean, addr: &folio->flags); |
363 | } |
364 | } |
365 | EXPORT_SYMBOL(flush_dcache_folio); |
366 | |
367 | void flush_dcache_page(struct page *page) |
368 | { |
369 | flush_dcache_folio(page_folio(page)); |
370 | } |
371 | EXPORT_SYMBOL(flush_dcache_page); |
372 | /* |
373 | * Flush an anonymous page so that users of get_user_pages() |
374 | * can safely access the data. The expected sequence is: |
375 | * |
376 | * get_user_pages() |
377 | * -> flush_anon_page |
378 | * memcpy() to/from page |
379 | * if written to page, flush_dcache_page() |
380 | */ |
381 | void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr); |
382 | void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
383 | { |
384 | unsigned long pfn; |
385 | |
386 | /* VIPT non-aliasing caches need do nothing */ |
387 | if (cache_is_vipt_nonaliasing()) |
388 | return; |
389 | |
390 | /* |
391 | * Write back and invalidate userspace mapping. |
392 | */ |
393 | pfn = page_to_pfn(page); |
394 | if (cache_is_vivt()) { |
395 | flush_cache_page(vma, vmaddr, pfn); |
396 | } else { |
397 | /* |
398 | * For aliasing VIPT, we can flush an alias of the |
399 | * userspace address only. |
400 | */ |
401 | flush_pfn_alias(pfn, vmaddr); |
402 | __flush_icache_all(); |
403 | } |
404 | |
405 | /* |
406 | * Invalidate kernel mapping. No data should be contained |
407 | * in this mapping of the page. FIXME: this is overkill |
408 | * since we actually ask for a write-back and invalidate. |
409 | */ |
410 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
411 | } |
412 | |