1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* include/asm-generic/tlb.h |
3 | * |
4 | * Generic TLB shootdown code |
5 | * |
6 | * Copyright 2001 Red Hat, Inc. |
7 | * Based on code from mm/memory.c Copyright Linus Torvalds and others. |
8 | * |
9 | * Copyright 2011 Red Hat, Inc., Peter Zijlstra |
10 | */ |
11 | #ifndef _ASM_GENERIC__TLB_H |
12 | #define _ASM_GENERIC__TLB_H |
13 | |
14 | #include <linux/mmu_notifier.h> |
15 | #include <linux/swap.h> |
16 | #include <linux/hugetlb_inline.h> |
17 | #include <asm/tlbflush.h> |
18 | #include <asm/cacheflush.h> |
19 | |
20 | /* |
21 | * Blindly accessing user memory from NMI context can be dangerous |
22 | * if we're in the middle of switching the current user task or switching |
23 | * the loaded mm. |
24 | */ |
25 | #ifndef nmi_uaccess_okay |
26 | # define nmi_uaccess_okay() true |
27 | #endif |
28 | |
29 | #ifdef CONFIG_MMU |
30 | |
31 | /* |
32 | * Generic MMU-gather implementation. |
33 | * |
34 | * The mmu_gather data structure is used by the mm code to implement the |
35 | * correct and efficient ordering of freeing pages and TLB invalidations. |
36 | * |
37 | * This correct ordering is: |
38 | * |
39 | * 1) unhook page |
40 | * 2) TLB invalidate page |
41 | * 3) free page |
42 | * |
43 | * That is, we must never free a page before we have ensured there are no live |
44 | * translations left to it. Otherwise it might be possible to observe (or |
45 | * worse, change) the page content after it has been reused. |
46 | * |
47 | * The mmu_gather API consists of: |
48 | * |
49 | * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu() |
50 | * |
51 | * start and finish a mmu_gather |
52 | * |
53 | * Finish in particular will issue a (final) TLB invalidate and free |
54 | * all (remaining) queued pages. |
55 | * |
56 | * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA |
57 | * |
58 | * Defaults to flushing at tlb_end_vma() to reset the range; helps when |
59 | * there's large holes between the VMAs. |
60 | * |
61 | * - tlb_remove_table() |
62 | * |
63 | * tlb_remove_table() is the basic primitive to free page-table directories |
64 | * (__p*_free_tlb()). In it's most primitive form it is an alias for |
65 | * tlb_remove_page() below, for when page directories are pages and have no |
66 | * additional constraints. |
67 | * |
68 | * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE. |
69 | * |
70 | * - tlb_remove_page() / __tlb_remove_page() |
71 | * - tlb_remove_page_size() / __tlb_remove_page_size() |
72 | * |
73 | * __tlb_remove_page_size() is the basic primitive that queues a page for |
74 | * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a |
75 | * boolean indicating if the queue is (now) full and a call to |
76 | * tlb_flush_mmu() is required. |
77 | * |
78 | * tlb_remove_page() and tlb_remove_page_size() imply the call to |
79 | * tlb_flush_mmu() when required and has no return value. |
80 | * |
81 | * - tlb_change_page_size() |
82 | * |
83 | * call before __tlb_remove_page*() to set the current page-size; implies a |
84 | * possible tlb_flush_mmu() call. |
85 | * |
86 | * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly() |
87 | * |
88 | * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets |
89 | * related state, like the range) |
90 | * |
91 | * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees |
92 | * whatever pages are still batched. |
93 | * |
94 | * - mmu_gather::fullmm |
95 | * |
96 | * A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free |
97 | * the entire mm; this allows a number of optimizations. |
98 | * |
99 | * - We can ignore tlb_{start,end}_vma(); because we don't |
100 | * care about ranges. Everything will be shot down. |
101 | * |
102 | * - (RISC) architectures that use ASIDs can cycle to a new ASID |
103 | * and delay the invalidation until ASID space runs out. |
104 | * |
105 | * - mmu_gather::need_flush_all |
106 | * |
107 | * A flag that can be set by the arch code if it wants to force |
108 | * flush the entire TLB irrespective of the range. For instance |
109 | * x86-PAE needs this when changing top-level entries. |
110 | * |
111 | * And allows the architecture to provide and implement tlb_flush(): |
112 | * |
113 | * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make |
114 | * use of: |
115 | * |
116 | * - mmu_gather::start / mmu_gather::end |
117 | * |
118 | * which provides the range that needs to be flushed to cover the pages to |
119 | * be freed. |
120 | * |
121 | * - mmu_gather::freed_tables |
122 | * |
123 | * set when we freed page table pages |
124 | * |
125 | * - tlb_get_unmap_shift() / tlb_get_unmap_size() |
126 | * |
127 | * returns the smallest TLB entry size unmapped in this range. |
128 | * |
129 | * If an architecture does not provide tlb_flush() a default implementation |
130 | * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is |
131 | * specified, in which case we'll default to flush_tlb_mm(). |
132 | * |
133 | * Additionally there are a few opt-in features: |
134 | * |
135 | * MMU_GATHER_PAGE_SIZE |
136 | * |
137 | * This ensures we call tlb_flush() every time tlb_change_page_size() actually |
138 | * changes the size and provides mmu_gather::page_size to tlb_flush(). |
139 | * |
140 | * This might be useful if your architecture has size specific TLB |
141 | * invalidation instructions. |
142 | * |
143 | * MMU_GATHER_TABLE_FREE |
144 | * |
145 | * This provides tlb_remove_table(), to be used instead of tlb_remove_page() |
146 | * for page directores (__p*_free_tlb()). |
147 | * |
148 | * Useful if your architecture has non-page page directories. |
149 | * |
150 | * When used, an architecture is expected to provide __tlb_remove_table() |
151 | * which does the actual freeing of these pages. |
152 | * |
153 | * MMU_GATHER_RCU_TABLE_FREE |
154 | * |
155 | * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see |
156 | * comment below). |
157 | * |
158 | * Useful if your architecture doesn't use IPIs for remote TLB invalidates |
159 | * and therefore doesn't naturally serialize with software page-table walkers. |
160 | * |
161 | * MMU_GATHER_NO_FLUSH_CACHE |
162 | * |
163 | * Indicates the architecture has flush_cache_range() but it needs *NOT* be called |
164 | * before unmapping a VMA. |
165 | * |
166 | * NOTE: strictly speaking we shouldn't have this knob and instead rely on |
167 | * flush_cache_range() being a NOP, except Sparc64 seems to be |
168 | * different here. |
169 | * |
170 | * MMU_GATHER_MERGE_VMAS |
171 | * |
172 | * Indicates the architecture wants to merge ranges over VMAs; typical when |
173 | * multiple range invalidates are more expensive than a full invalidate. |
174 | * |
175 | * MMU_GATHER_NO_RANGE |
176 | * |
177 | * Use this if your architecture lacks an efficient flush_tlb_range(). This |
178 | * option implies MMU_GATHER_MERGE_VMAS above. |
179 | * |
180 | * MMU_GATHER_NO_GATHER |
181 | * |
182 | * If the option is set the mmu_gather will not track individual pages for |
183 | * delayed page free anymore. A platform that enables the option needs to |
184 | * provide its own implementation of the __tlb_remove_page_size() function to |
185 | * free pages. |
186 | * |
187 | * This is useful if your architecture already flushes TLB entries in the |
188 | * various ptep_get_and_clear() functions. |
189 | */ |
190 | |
191 | #ifdef CONFIG_MMU_GATHER_TABLE_FREE |
192 | |
193 | struct mmu_table_batch { |
194 | #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE |
195 | struct rcu_head rcu; |
196 | #endif |
197 | unsigned int nr; |
198 | void *tables[]; |
199 | }; |
200 | |
201 | #define MAX_TABLE_BATCH \ |
202 | ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) |
203 | |
204 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); |
205 | |
206 | #else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */ |
207 | |
208 | /* |
209 | * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based |
210 | * page directories and we can use the normal page batching to free them. |
211 | */ |
212 | #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page)) |
213 | |
214 | #endif /* CONFIG_MMU_GATHER_TABLE_FREE */ |
215 | |
216 | #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE |
217 | /* |
218 | * This allows an architecture that does not use the linux page-tables for |
219 | * hardware to skip the TLBI when freeing page tables. |
220 | */ |
221 | #ifndef tlb_needs_table_invalidate |
222 | #define tlb_needs_table_invalidate() (true) |
223 | #endif |
224 | |
225 | void tlb_remove_table_sync_one(void); |
226 | |
227 | #else |
228 | |
229 | #ifdef tlb_needs_table_invalidate |
230 | #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE |
231 | #endif |
232 | |
233 | static inline void tlb_remove_table_sync_one(void) { } |
234 | |
235 | #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ |
236 | |
237 | |
238 | #ifndef CONFIG_MMU_GATHER_NO_GATHER |
239 | /* |
240 | * If we can't allocate a page to make a big batch of page pointers |
241 | * to work on, then just handle a few from the on-stack structure. |
242 | */ |
243 | #define MMU_GATHER_BUNDLE 8 |
244 | |
245 | struct mmu_gather_batch { |
246 | struct mmu_gather_batch *next; |
247 | unsigned int nr; |
248 | unsigned int max; |
249 | struct encoded_page *encoded_pages[]; |
250 | }; |
251 | |
252 | #define MAX_GATHER_BATCH \ |
253 | ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) |
254 | |
255 | /* |
256 | * Limit the maximum number of mmu_gather batches to reduce a risk of soft |
257 | * lockups for non-preemptible kernels on huge machines when a lot of memory |
258 | * is zapped during unmapping. |
259 | * 10K pages freed at once should be safe even without a preemption point. |
260 | */ |
261 | #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) |
262 | |
263 | extern bool __tlb_remove_page_size(struct mmu_gather *tlb, |
264 | struct encoded_page *page, |
265 | int page_size); |
266 | |
267 | #ifdef CONFIG_SMP |
268 | /* |
269 | * This both sets 'delayed_rmap', and returns true. It would be an inline |
270 | * function, except we define it before the 'struct mmu_gather'. |
271 | */ |
272 | #define tlb_delay_rmap(tlb) (((tlb)->delayed_rmap = 1), true) |
273 | extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma); |
274 | #endif |
275 | |
276 | #endif |
277 | |
278 | /* |
279 | * We have a no-op version of the rmap removal that doesn't |
280 | * delay anything. That is used on S390, which flushes remote |
281 | * TLBs synchronously, and on UP, which doesn't have any |
282 | * remote TLBs to flush and is not preemptible due to this |
283 | * all happening under the page table lock. |
284 | */ |
285 | #ifndef tlb_delay_rmap |
286 | #define tlb_delay_rmap(tlb) (false) |
287 | static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { } |
288 | #endif |
289 | |
290 | /* |
291 | * struct mmu_gather is an opaque type used by the mm code for passing around |
292 | * any data needed by arch specific code for tlb_remove_page. |
293 | */ |
294 | struct mmu_gather { |
295 | struct mm_struct *mm; |
296 | |
297 | #ifdef CONFIG_MMU_GATHER_TABLE_FREE |
298 | struct mmu_table_batch *batch; |
299 | #endif |
300 | |
301 | unsigned long start; |
302 | unsigned long end; |
303 | /* |
304 | * we are in the middle of an operation to clear |
305 | * a full mm and can make some optimizations |
306 | */ |
307 | unsigned int fullmm : 1; |
308 | |
309 | /* |
310 | * we have performed an operation which |
311 | * requires a complete flush of the tlb |
312 | */ |
313 | unsigned int need_flush_all : 1; |
314 | |
315 | /* |
316 | * we have removed page directories |
317 | */ |
318 | unsigned int freed_tables : 1; |
319 | |
320 | /* |
321 | * Do we have pending delayed rmap removals? |
322 | */ |
323 | unsigned int delayed_rmap : 1; |
324 | |
325 | /* |
326 | * at which levels have we cleared entries? |
327 | */ |
328 | unsigned int cleared_ptes : 1; |
329 | unsigned int cleared_pmds : 1; |
330 | unsigned int cleared_puds : 1; |
331 | unsigned int cleared_p4ds : 1; |
332 | |
333 | /* |
334 | * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma |
335 | */ |
336 | unsigned int vma_exec : 1; |
337 | unsigned int vma_huge : 1; |
338 | unsigned int vma_pfn : 1; |
339 | |
340 | unsigned int batch_count; |
341 | |
342 | #ifndef CONFIG_MMU_GATHER_NO_GATHER |
343 | struct mmu_gather_batch *active; |
344 | struct mmu_gather_batch local; |
345 | struct page *__pages[MMU_GATHER_BUNDLE]; |
346 | |
347 | #ifdef CONFIG_MMU_GATHER_PAGE_SIZE |
348 | unsigned int page_size; |
349 | #endif |
350 | #endif |
351 | }; |
352 | |
353 | void tlb_flush_mmu(struct mmu_gather *tlb); |
354 | |
355 | static inline void __tlb_adjust_range(struct mmu_gather *tlb, |
356 | unsigned long address, |
357 | unsigned int range_size) |
358 | { |
359 | tlb->start = min(tlb->start, address); |
360 | tlb->end = max(tlb->end, address + range_size); |
361 | } |
362 | |
363 | static inline void __tlb_reset_range(struct mmu_gather *tlb) |
364 | { |
365 | if (tlb->fullmm) { |
366 | tlb->start = tlb->end = ~0; |
367 | } else { |
368 | tlb->start = TASK_SIZE; |
369 | tlb->end = 0; |
370 | } |
371 | tlb->freed_tables = 0; |
372 | tlb->cleared_ptes = 0; |
373 | tlb->cleared_pmds = 0; |
374 | tlb->cleared_puds = 0; |
375 | tlb->cleared_p4ds = 0; |
376 | /* |
377 | * Do not reset mmu_gather::vma_* fields here, we do not |
378 | * call into tlb_start_vma() again to set them if there is an |
379 | * intermediate flush. |
380 | */ |
381 | } |
382 | |
383 | #ifdef CONFIG_MMU_GATHER_NO_RANGE |
384 | |
385 | #if defined(tlb_flush) |
386 | #error MMU_GATHER_NO_RANGE relies on default tlb_flush() |
387 | #endif |
388 | |
389 | /* |
390 | * When an architecture does not have efficient means of range flushing TLBs |
391 | * there is no point in doing intermediate flushes on tlb_end_vma() to keep the |
392 | * range small. We equally don't have to worry about page granularity or other |
393 | * things. |
394 | * |
395 | * All we need to do is issue a full flush for any !0 range. |
396 | */ |
397 | static inline void tlb_flush(struct mmu_gather *tlb) |
398 | { |
399 | if (tlb->end) |
400 | flush_tlb_mm(tlb->mm); |
401 | } |
402 | |
403 | #else /* CONFIG_MMU_GATHER_NO_RANGE */ |
404 | |
405 | #ifndef tlb_flush |
406 | /* |
407 | * When an architecture does not provide its own tlb_flush() implementation |
408 | * but does have a reasonably efficient flush_vma_range() implementation |
409 | * use that. |
410 | */ |
411 | static inline void tlb_flush(struct mmu_gather *tlb) |
412 | { |
413 | if (tlb->fullmm || tlb->need_flush_all) { |
414 | flush_tlb_mm(tlb->mm); |
415 | } else if (tlb->end) { |
416 | struct vm_area_struct vma = { |
417 | .vm_mm = tlb->mm, |
418 | .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | |
419 | (tlb->vma_huge ? VM_HUGETLB : 0), |
420 | }; |
421 | |
422 | flush_tlb_range(&vma, tlb->start, tlb->end); |
423 | } |
424 | } |
425 | #endif |
426 | |
427 | #endif /* CONFIG_MMU_GATHER_NO_RANGE */ |
428 | |
429 | static inline void |
430 | tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) |
431 | { |
432 | /* |
433 | * flush_tlb_range() implementations that look at VM_HUGETLB (tile, |
434 | * mips-4k) flush only large pages. |
435 | * |
436 | * flush_tlb_range() implementations that flush I-TLB also flush D-TLB |
437 | * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing |
438 | * range. |
439 | * |
440 | * We rely on tlb_end_vma() to issue a flush, such that when we reset |
441 | * these values the batch is empty. |
442 | */ |
443 | tlb->vma_huge = is_vm_hugetlb_page(vma); |
444 | tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); |
445 | tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); |
446 | } |
447 | |
448 | static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) |
449 | { |
450 | /* |
451 | * Anything calling __tlb_adjust_range() also sets at least one of |
452 | * these bits. |
453 | */ |
454 | if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds || |
455 | tlb->cleared_puds || tlb->cleared_p4ds)) |
456 | return; |
457 | |
458 | tlb_flush(tlb); |
459 | __tlb_reset_range(tlb); |
460 | } |
461 | |
462 | static inline void tlb_remove_page_size(struct mmu_gather *tlb, |
463 | struct page *page, int page_size) |
464 | { |
465 | if (__tlb_remove_page_size(tlb, page: encode_page(page, flags: 0), page_size)) |
466 | tlb_flush_mmu(tlb); |
467 | } |
468 | |
469 | static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page, unsigned int flags) |
470 | { |
471 | return __tlb_remove_page_size(tlb, page: encode_page(page, flags), PAGE_SIZE); |
472 | } |
473 | |
474 | /* tlb_remove_page |
475 | * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when |
476 | * required. |
477 | */ |
478 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
479 | { |
480 | return tlb_remove_page_size(tlb, page, PAGE_SIZE); |
481 | } |
482 | |
483 | static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt) |
484 | { |
485 | tlb_remove_table(tlb, table: pt); |
486 | } |
487 | |
488 | /* Like tlb_remove_ptdesc, but for page-like page directories. */ |
489 | static inline void tlb_remove_page_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt) |
490 | { |
491 | tlb_remove_page(tlb, ptdesc_page(pt)); |
492 | } |
493 | |
494 | static inline void tlb_change_page_size(struct mmu_gather *tlb, |
495 | unsigned int page_size) |
496 | { |
497 | #ifdef CONFIG_MMU_GATHER_PAGE_SIZE |
498 | if (tlb->page_size && tlb->page_size != page_size) { |
499 | if (!tlb->fullmm && !tlb->need_flush_all) |
500 | tlb_flush_mmu(tlb); |
501 | } |
502 | |
503 | tlb->page_size = page_size; |
504 | #endif |
505 | } |
506 | |
507 | static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) |
508 | { |
509 | if (tlb->cleared_ptes) |
510 | return PAGE_SHIFT; |
511 | if (tlb->cleared_pmds) |
512 | return PMD_SHIFT; |
513 | if (tlb->cleared_puds) |
514 | return PUD_SHIFT; |
515 | if (tlb->cleared_p4ds) |
516 | return P4D_SHIFT; |
517 | |
518 | return PAGE_SHIFT; |
519 | } |
520 | |
521 | static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) |
522 | { |
523 | return 1UL << tlb_get_unmap_shift(tlb); |
524 | } |
525 | |
526 | /* |
527 | * In the case of tlb vma handling, we can optimise these away in the |
528 | * case where we're doing a full MM flush. When we're doing a munmap, |
529 | * the vmas are adjusted to only cover the region to be torn down. |
530 | */ |
531 | static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) |
532 | { |
533 | if (tlb->fullmm) |
534 | return; |
535 | |
536 | tlb_update_vma_flags(tlb, vma); |
537 | #ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE |
538 | flush_cache_range(vma, start: vma->vm_start, end: vma->vm_end); |
539 | #endif |
540 | } |
541 | |
542 | static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) |
543 | { |
544 | if (tlb->fullmm) |
545 | return; |
546 | |
547 | /* |
548 | * VM_PFNMAP is more fragile because the core mm will not track the |
549 | * page mapcount -- there might not be page-frames for these PFNs after |
550 | * all. Force flush TLBs for such ranges to avoid munmap() vs |
551 | * unmap_mapping_range() races. |
552 | */ |
553 | if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) { |
554 | /* |
555 | * Do a TLB flush and reset the range at VMA boundaries; this avoids |
556 | * the ranges growing with the unused space between consecutive VMAs. |
557 | */ |
558 | tlb_flush_mmu_tlbonly(tlb); |
559 | } |
560 | } |
561 | |
562 | /* |
563 | * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end, |
564 | * and set corresponding cleared_*. |
565 | */ |
566 | static inline void tlb_flush_pte_range(struct mmu_gather *tlb, |
567 | unsigned long address, unsigned long size) |
568 | { |
569 | __tlb_adjust_range(tlb, address, range_size: size); |
570 | tlb->cleared_ptes = 1; |
571 | } |
572 | |
573 | static inline void tlb_flush_pmd_range(struct mmu_gather *tlb, |
574 | unsigned long address, unsigned long size) |
575 | { |
576 | __tlb_adjust_range(tlb, address, range_size: size); |
577 | tlb->cleared_pmds = 1; |
578 | } |
579 | |
580 | static inline void tlb_flush_pud_range(struct mmu_gather *tlb, |
581 | unsigned long address, unsigned long size) |
582 | { |
583 | __tlb_adjust_range(tlb, address, range_size: size); |
584 | tlb->cleared_puds = 1; |
585 | } |
586 | |
587 | static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, |
588 | unsigned long address, unsigned long size) |
589 | { |
590 | __tlb_adjust_range(tlb, address, range_size: size); |
591 | tlb->cleared_p4ds = 1; |
592 | } |
593 | |
594 | #ifndef __tlb_remove_tlb_entry |
595 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) |
596 | #endif |
597 | |
598 | /** |
599 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. |
600 | * |
601 | * Record the fact that pte's were really unmapped by updating the range, |
602 | * so we can later optimise away the tlb invalidate. This helps when |
603 | * userspace is unmapping already-unmapped pages, which happens quite a lot. |
604 | */ |
605 | #define tlb_remove_tlb_entry(tlb, ptep, address) \ |
606 | do { \ |
607 | tlb_flush_pte_range(tlb, address, PAGE_SIZE); \ |
608 | __tlb_remove_tlb_entry(tlb, ptep, address); \ |
609 | } while (0) |
610 | |
611 | #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ |
612 | do { \ |
613 | unsigned long _sz = huge_page_size(h); \ |
614 | if (_sz >= P4D_SIZE) \ |
615 | tlb_flush_p4d_range(tlb, address, _sz); \ |
616 | else if (_sz >= PUD_SIZE) \ |
617 | tlb_flush_pud_range(tlb, address, _sz); \ |
618 | else if (_sz >= PMD_SIZE) \ |
619 | tlb_flush_pmd_range(tlb, address, _sz); \ |
620 | else \ |
621 | tlb_flush_pte_range(tlb, address, _sz); \ |
622 | __tlb_remove_tlb_entry(tlb, ptep, address); \ |
623 | } while (0) |
624 | |
625 | /** |
626 | * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation |
627 | * This is a nop so far, because only x86 needs it. |
628 | */ |
629 | #ifndef __tlb_remove_pmd_tlb_entry |
630 | #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) |
631 | #endif |
632 | |
633 | #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ |
634 | do { \ |
635 | tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \ |
636 | __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ |
637 | } while (0) |
638 | |
639 | /** |
640 | * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb |
641 | * invalidation. This is a nop so far, because only x86 needs it. |
642 | */ |
643 | #ifndef __tlb_remove_pud_tlb_entry |
644 | #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) |
645 | #endif |
646 | |
647 | #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ |
648 | do { \ |
649 | tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \ |
650 | __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ |
651 | } while (0) |
652 | |
653 | /* |
654 | * For things like page tables caches (ie caching addresses "inside" the |
655 | * page tables, like x86 does), for legacy reasons, flushing an |
656 | * individual page had better flush the page table caches behind it. This |
657 | * is definitely how x86 works, for example. And if you have an |
658 | * architected non-legacy page table cache (which I'm not aware of |
659 | * anybody actually doing), you're going to have some architecturally |
660 | * explicit flushing for that, likely *separate* from a regular TLB entry |
661 | * flush, and thus you'd need more than just some range expansion.. |
662 | * |
663 | * So if we ever find an architecture |
664 | * that would want something that odd, I think it is up to that |
665 | * architecture to do its own odd thing, not cause pain for others |
666 | * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com |
667 | * |
668 | * For now w.r.t page table cache, mark the range_size as PAGE_SIZE |
669 | */ |
670 | |
671 | #ifndef pte_free_tlb |
672 | #define pte_free_tlb(tlb, ptep, address) \ |
673 | do { \ |
674 | tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \ |
675 | tlb->freed_tables = 1; \ |
676 | __pte_free_tlb(tlb, ptep, address); \ |
677 | } while (0) |
678 | #endif |
679 | |
680 | #ifndef pmd_free_tlb |
681 | #define pmd_free_tlb(tlb, pmdp, address) \ |
682 | do { \ |
683 | tlb_flush_pud_range(tlb, address, PAGE_SIZE); \ |
684 | tlb->freed_tables = 1; \ |
685 | __pmd_free_tlb(tlb, pmdp, address); \ |
686 | } while (0) |
687 | #endif |
688 | |
689 | #ifndef pud_free_tlb |
690 | #define pud_free_tlb(tlb, pudp, address) \ |
691 | do { \ |
692 | tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \ |
693 | tlb->freed_tables = 1; \ |
694 | __pud_free_tlb(tlb, pudp, address); \ |
695 | } while (0) |
696 | #endif |
697 | |
698 | #ifndef p4d_free_tlb |
699 | #define p4d_free_tlb(tlb, pudp, address) \ |
700 | do { \ |
701 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
702 | tlb->freed_tables = 1; \ |
703 | __p4d_free_tlb(tlb, pudp, address); \ |
704 | } while (0) |
705 | #endif |
706 | |
707 | #ifndef pte_needs_flush |
708 | static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte) |
709 | { |
710 | return true; |
711 | } |
712 | #endif |
713 | |
714 | #ifndef huge_pmd_needs_flush |
715 | static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd) |
716 | { |
717 | return true; |
718 | } |
719 | #endif |
720 | |
721 | #endif /* CONFIG_MMU */ |
722 | |
723 | #endif /* _ASM_GENERIC__TLB_H */ |
724 | |